aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2016-03-07 03:27:30 -0500
committerIngo Molnar <mingo@kernel.org>2016-03-07 03:27:30 -0500
commitec87e1cf7d8399d81d8965c6d852f8057a8dd687 (patch)
tree472a168fa4861090edf110c8a9712a5c15ea259f
parent869ae76147ffdf21ad24f0e599303cd58a2bb39f (diff)
parentf6cede5b49e822ebc41a099fe41ab4989f64e2cb (diff)
Merge tag 'v4.5-rc7' into x86/asm, to pick up SMAP fix
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--Documentation/DocBook/media/v4l/media-types.xml4
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt5
-rw-r--r--Documentation/devicetree/bindings/net/renesas,ravb.txt4
-rw-r--r--Documentation/devicetree/bindings/regulator/tps65217.txt10
-rw-r--r--Documentation/devicetree/bindings/rtc/s3c-rtc.txt6
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-imx-uart.txt2
-rw-r--r--Documentation/watchdog/watchdog-parameters.txt4
-rw-r--r--MAINTAINERS41
-rw-r--r--Makefile2
-rw-r--r--arch/arc/Kconfig18
-rw-r--r--arch/arc/Makefile4
-rw-r--r--arch/arc/configs/axs101_defconfig4
-rw-r--r--arch/arc/configs/axs103_defconfig10
-rw-r--r--arch/arc/configs/axs103_smp_defconfig10
-rw-r--r--arch/arc/configs/nsim_700_defconfig5
-rw-r--r--arch/arc/configs/nsim_hs_defconfig3
-rw-r--r--arch/arc/configs/nsim_hs_smp_defconfig6
-rw-r--r--arch/arc/configs/nsimosci_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig3
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig12
-rw-r--r--arch/arc/configs/tb10x_defconfig18
-rw-r--r--arch/arc/include/asm/arcregs.h32
-rw-r--r--arch/arc/include/asm/irq.h2
-rw-r--r--arch/arc/include/asm/irqflags-arcv2.h11
-rw-r--r--arch/arc/kernel/entry-arcv2.S11
-rw-r--r--arch/arc/kernel/intc-compact.c3
-rw-r--r--arch/arc/kernel/mcip.c60
-rw-r--r--arch/arc/kernel/setup.c80
-rw-r--r--arch/arc/kernel/smp.c3
-rw-r--r--arch/arm/boot/compressed/Makefile2
-rw-r--r--arch/arm/boot/dts/am335x-bone-common.dtsi14
-rw-r--r--arch/arm/boot/dts/am335x-chilisom.dtsi14
-rw-r--r--arch/arm/boot/dts/am335x-nano.dts14
-rw-r--r--arch/arm/boot/dts/am335x-pepper.dts14
-rw-r--r--arch/arm/boot/dts/am335x-shc.dts4
-rw-r--r--arch/arm/boot/dts/am335x-sl50.dts13
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15.dts4
-rw-r--r--arch/arm/boot/dts/am57xx-cl-som-am57x.dts2
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi1
-rw-r--r--arch/arm/boot/dts/kirkwood-ds112.dts2
-rw-r--r--arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts31
-rw-r--r--arch/arm/boot/dts/r8a7791-porter.dts1
-rw-r--r--arch/arm/boot/dts/sama5d2-pinfunc.h2
-rw-r--r--arch/arm/boot/dts/tps65217.dtsi56
-rw-r--r--arch/arm/crypto/aes-ce-glue.c4
-rw-r--r--arch/arm/include/asm/arch_gicv3.h1
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h21
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kvm/guest.c2
-rw-r--r--arch/arm/kvm/mmio.c3
-rw-r--r--arch/arm/mach-omap2/board-generic.c22
-rw-r--r--arch/arm/mach-omap2/gpmc-onenand.c6
-rw-r--r--arch/arm/mach-omap2/omap_device.c14
-rw-r--r--arch/arm/mach-shmobile/common.h1
-rw-r--r--arch/arm/mach-shmobile/headsmp-scu.S6
-rw-r--r--arch/arm/mach-shmobile/headsmp.S28
-rw-r--r--arch/arm/mach-shmobile/platsmp-apmu.c1
-rw-r--r--arch/arm/mach-shmobile/platsmp-scu.c4
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7779.c2
-rw-r--r--arch/arm/mm/mmap.c2
-rw-r--r--arch/arm/mm/pageattr.c3
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/boot/Makefile4
-rw-r--r--arch/arm64/boot/install.sh14
-rw-r--r--arch/arm64/crypto/aes-glue.c4
-rw-r--r--arch/arm64/include/asm/kvm_arm.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h7
-rw-r--r--arch/arm64/kernel/debug-monitors.c48
-rw-r--r--arch/arm64/kernel/image.h1
-rw-r--r--arch/arm64/kernel/stacktrace.c17
-rw-r--r--arch/arm64/kernel/traps.c11
-rw-r--r--arch/arm64/kvm/guest.c2
-rw-r--r--arch/arm64/kvm/hyp-init.S12
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c20
-rw-r--r--arch/arm64/lib/strnlen.S2
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--arch/arm64/mm/fault.c9
-rw-r--r--arch/arm64/mm/init.c4
-rw-r--r--arch/arm64/mm/mmap.c4
-rw-r--r--arch/m68k/configs/amiga_defconfig9
-rw-r--r--arch/m68k/configs/apollo_defconfig9
-rw-r--r--arch/m68k/configs/atari_defconfig9
-rw-r--r--arch/m68k/configs/bvme6000_defconfig9
-rw-r--r--arch/m68k/configs/hp300_defconfig9
-rw-r--r--arch/m68k/configs/mac_defconfig9
-rw-r--r--arch/m68k/configs/multi_defconfig9
-rw-r--r--arch/m68k/configs/mvme147_defconfig9
-rw-r--r--arch/m68k/configs/mvme16x_defconfig9
-rw-r--r--arch/m68k/configs/q40_defconfig9
-rw-r--r--arch/m68k/configs/sun3_defconfig9
-rw-r--r--arch/m68k/configs/sun3x_defconfig9
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h1
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/mips/jz4740/gpio.c2
-rw-r--r--arch/mips/kernel/r2300_fpu.S2
-rw-r--r--arch/mips/kernel/r4k_fpu.S2
-rw-r--r--arch/mips/kernel/traps.c13
-rw-r--r--arch/mips/kvm/mips.c4
-rw-r--r--arch/mips/mm/mmap.c4
-rw-r--r--arch/mips/mm/sc-mips.c13
-rw-r--r--arch/parisc/include/asm/floppy.h2
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h3
-rw-r--r--arch/parisc/kernel/ptrace.c16
-rw-r--r--arch/parisc/kernel/syscall.S5
-rw-r--r--arch/parisc/kernel/syscall_table.S1
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h4
-rw-r--r--arch/powerpc/include/asm/eeh.h1
-rw-r--r--arch/powerpc/include/asm/trace.h8
-rw-r--r--arch/powerpc/kernel/eeh_driver.c6
-rw-r--r--arch/powerpc/kernel/eeh_pe.c2
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c3
-rw-r--r--arch/powerpc/kernel/module_64.c2
-rw-r--r--arch/powerpc/kernel/process.c4
-rw-r--r--arch/powerpc/mm/hash64_64k.c8
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c12
-rw-r--r--arch/powerpc/mm/hugetlbpage-book3e.c13
-rw-r--r--arch/powerpc/mm/mmap.c4
-rw-r--r--arch/powerpc/mm/pgtable_64.c32
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c5
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c1
-rw-r--r--arch/powerpc/platforms/powernv/pci.c26
-rw-r--r--arch/powerpc/platforms/powernv/pci.h1
-rw-r--r--arch/s390/include/asm/fpu/internal.h2
-rw-r--r--arch/s390/include/asm/livepatch.h2
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/perf_event.c8
-rw-r--r--arch/s390/kernel/stacktrace.c47
-rw-r--r--arch/s390/kernel/trace.c3
-rw-r--r--arch/s390/mm/maccess.c12
-rw-r--r--arch/s390/oprofile/backtrace.c8
-rw-r--r--arch/sparc/Makefile6
-rw-r--r--arch/sparc/include/uapi/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/entry.S17
-rw-r--r--arch/sparc/kernel/hvcalls.S3
-rw-r--r--arch/sparc/kernel/signal_64.c2
-rw-r--r--arch/sparc/kernel/sparc_ksyms_64.c1
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c2
-rw-r--r--arch/sparc/kernel/syscalls.S36
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/um/kernel/reboot.c1
-rw-r--r--arch/um/kernel/signal.c2
-rw-r--r--arch/x86/entry/entry_32.S1
-rw-r--r--arch/x86/entry/entry_64_compat.S1
-rw-r--r--arch/x86/include/asm/livepatch.h2
-rw-r--r--arch/x86/include/asm/pci_x86.h2
-rw-r--r--arch/x86/include/asm/uaccess_32.h26
-rw-r--r--arch/x86/include/asm/xen/pci.h4
-rw-r--r--arch/x86/kernel/acpi/sleep.c7
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_uncore.c2
-rw-r--r--arch/x86/kvm/emulate.c4
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/vmx.c14
-rw-r--r--arch/x86/kvm/x86.c8
-rw-r--r--arch/x86/mm/gup.c2
-rw-r--r--arch/x86/mm/mmap.c6
-rw-r--r--arch/x86/mm/mpx.c2
-rw-r--r--arch/x86/mm/pageattr.c14
-rw-r--r--arch/x86/pci/common.c26
-rw-r--r--arch/x86/pci/intel_mid_pci.c9
-rw-r--r--arch/x86/pci/irq.c23
-rw-r--r--arch/x86/pci/xen.c5
-rw-r--r--arch/x86/platform/intel-quark/imr.c4
-rw-r--r--arch/x86/um/os-Linux/task_size.c4
-rw-r--r--block/Kconfig13
-rw-r--r--block/bio.c9
-rw-r--r--block/blk-cgroup.c9
-rw-r--r--block/blk-map.c91
-rw-r--r--block/blk-merge.c8
-rw-r--r--block/blk-mq.c6
-rw-r--r--block/blk-settings.c4
-rw-r--r--block/blk-sysfs.c5
-rw-r--r--block/deadline-iosched.c3
-rw-r--r--drivers/acpi/nfit.c105
-rw-r--r--drivers/acpi/pci_irq.c17
-rw-r--r--drivers/acpi/pci_link.c128
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/ata/ahci.c49
-rw-r--r--drivers/ata/ahci.h5
-rw-r--r--drivers/ata/ahci_xgene.c85
-rw-r--r--drivers/ata/libahci.c63
-rw-r--r--drivers/ata/libata-scsi.c11
-rw-r--r--drivers/ata/pata_rb532_cf.c11
-rw-r--r--drivers/block/floppy.c67
-rw-r--r--drivers/block/null_blk.c8
-rw-r--r--drivers/block/xen-blkfront.c74
-rw-r--r--drivers/char/random.c22
-rw-r--r--drivers/clk/Makefile2
-rw-r--r--drivers/clk/clk-gpio.c2
-rw-r--r--drivers/clk/clk-scpi.c2
-rw-r--r--drivers/clk/mvebu/dove-divider.c2
-rw-r--r--drivers/clk/qcom/gcc-apq8084.c1
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c1
-rw-r--r--drivers/clk/qcom/lcc-ipq806x.c1
-rw-r--r--drivers/clk/qcom/lcc-msm8960.c1
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c1
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c2
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c26
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c26
-rw-r--r--drivers/clk/tegra/clk-emc.c6
-rw-r--r--drivers/clk/tegra/clk-id.h1
-rw-r--r--drivers/clk/tegra/clk-pll.c50
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c5
-rw-r--r--drivers/clk/tegra/clk-tegra-super-gen4.c6
-rw-r--r--drivers/clk/tegra/clk-tegra210.c132
-rw-r--r--drivers/clk/ti/dpll3xxx.c3
-rw-r--r--drivers/clk/versatile/clk-icst.c3
-rw-r--r--drivers/cpufreq/Kconfig1
-rw-r--r--drivers/cpufreq/Kconfig.arm4
-rw-r--r--drivers/cpufreq/mt8173-cpufreq.c1
-rw-r--r--drivers/devfreq/tegra-devfreq.c2
-rw-r--r--drivers/dma/dw/core.c15
-rw-r--r--drivers/dma/dw/pci.c4
-rw-r--r--drivers/dma/edma.c41
-rw-r--r--drivers/dma/ioat/dma.c34
-rw-r--r--drivers/dma/pxa_dma.c8
-rw-r--r--drivers/gpio/gpio-rcar.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c2
-rw-r--r--drivers/gpu/drm/ast/ast_main.c2
-rw-r--r--drivers/gpu/drm/drm_atomic.c44
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c49
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c37
-rw-r--r--drivers/gpu/drm/drm_irq.c73
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c32
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c72
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c8
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c28
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h15
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c13
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c112
-rw-r--r--drivers/gpu/drm/i915/intel_display.c86
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c32
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c13
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c14
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c14
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c7
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c153
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h6
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_prime.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c16
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h13
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c65
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c22
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c48
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c4
-rw-r--r--drivers/gpu/host1x/bus.c2
-rw-r--r--drivers/gpu/host1x/dev.c7
-rw-r--r--drivers/gpu/host1x/dev.h1
-rw-r--r--drivers/hwmon/ads1015.c2
-rw-r--r--drivers/hwmon/gpio-fan.c7
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c3
-rw-r--r--drivers/i2c/busses/i2c-i801.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c4
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c2
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c2
-rw-r--r--drivers/infiniband/core/device.c1
-rw-r--r--drivers/infiniband/core/sa_query.c2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c9
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c63
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c7
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c41
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c18
-rw-r--r--drivers/iommu/amd_iommu.c4
-rw-r--r--drivers/iommu/amd_iommu_init.c63
-rw-r--r--drivers/iommu/dmar.c7
-rw-r--r--drivers/iommu/intel-iommu.c4
-rw-r--r--drivers/iommu/intel-svm.c37
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c18
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c9
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c2
-rw-r--r--drivers/lightnvm/core.c25
-rw-r--r--drivers/lightnvm/rrpc.c4
-rw-r--r--drivers/lightnvm/rrpc.h5
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/media/i2c/adp1653.c2
-rw-r--r--drivers/media/i2c/adv7604.c3
-rw-r--r--drivers/media/usb/au0828/au0828-video.c3
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c2
-rw-r--r--drivers/mtd/ubi/upd.c2
-rw-r--r--drivers/net/bonding/bond_main.c40
-rw-r--r--drivers/net/can/usb/ems_usb.c14
-rw-r--r--drivers/net/dsa/mv88e6352.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx.c27
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c1
-rw-r--r--drivers/net/ethernet/agere/et131x.c2
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c64
-rw-r--r--drivers/net/ethernet/amd/lance.c4
-rw-r--r--drivers/net/ethernet/arc/emac_main.c74
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c299
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c71
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c46
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c18
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c34
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c34
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c184
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h58
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c69
-rw-r--r--drivers/net/ethernet/realtek/r8169.c14
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c16
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c12
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c105
-rw-r--r--drivers/net/geneve.c31
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/phy/bcm7xxx.c43
-rw-r--r--drivers/net/phy/marvell.c15
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/usb/Kconfig10
-rw-r--r--drivers/net/usb/Makefile2
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vxlan.c9
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c188
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/io.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h4
-rw-r--r--drivers/nvdimm/bus.c20
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/nvme/host/Kconfig5
-rw-r--r--drivers/nvme/host/core.c112
-rw-r--r--drivers/nvme/host/lightnvm.c12
-rw-r--r--drivers/nvme/host/nvme.h12
-rw-r--r--drivers/nvme/host/pci.c160
-rw-r--r--drivers/of/irq.c9
-rw-r--r--drivers/pci/host/Kconfig1
-rw-r--r--drivers/pci/host/pci-keystone-dw.c11
-rw-r--r--drivers/pci/host/pci-layerscape.c21
-rw-r--r--drivers/pci/xen-pcifront.c10
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c9
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c5
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa2xx.c1
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c48
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c1
-rw-r--r--drivers/power/bq27xxx_battery_i2c.c37
-rw-r--r--drivers/s390/block/dasd.c1
-rw-r--r--drivers/s390/block/dasd_alias.c23
-rw-r--r--drivers/scsi/ipr.c5
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/scsi/scsi_sysfs.c6
-rw-r--r--drivers/sh/pm_runtime.c2
-rw-r--r--drivers/spi/spi-atmel.c1
-rw-r--r--drivers/spi/spi-bcm2835aux.c4
-rw-r--r--drivers/spi/spi-fsl-espi.c4
-rw-r--r--drivers/spi/spi-imx.c8
-rw-r--r--drivers/spi/spi-loopback-test.c1
-rw-r--r--drivers/spi/spi-omap2-mcspi.c3
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_pci.c4
-rw-r--r--drivers/usb/chipidea/debug.c3
-rw-r--r--drivers/usb/chipidea/otg.c2
-rw-r--r--drivers/usb/core/hub.c8
-rw-r--r--drivers/usb/dwc2/Kconfig1
-rw-r--r--drivers/usb/dwc2/core.c6
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c23
-rw-r--r--drivers/usb/dwc2/hcd_intr.c8
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/ep0.c5
-rw-r--r--drivers/usb/dwc3/gadget.c70
-rw-r--r--drivers/usb/gadget/legacy/inode.c7
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/udc/net2280.h15
-rw-r--r--drivers/usb/gadget/udc/udc-core.c3
-rw-r--r--drivers/usb/musb/musb_host.c8
-rw-r--r--drivers/usb/phy/phy-msm-usb.c20
-rw-r--r--drivers/usb/serial/Kconfig16
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/cp210x.c3
-rw-r--r--drivers/usb/serial/mxu11x0.c1006
-rw-r--r--drivers/usb/serial/option.c14
-rw-r--r--drivers/usb/serial/qcserial.c7
-rw-r--r--drivers/vfio/pci/vfio_pci.c9
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c9
-rw-r--r--drivers/vfio/vfio_iommu_type1.c6
-rw-r--r--drivers/vhost/vhost.c15
-rw-r--r--drivers/video/console/fbcon.c2
-rw-r--r--drivers/virtio/virtio_pci_modern.c2
-rw-r--r--drivers/watchdog/Kconfig11
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/sun4v_wdt.c191
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c9
-rw-r--r--drivers/xen/xen-scsiback.c80
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
-rw-r--r--fs/affs/file.c5
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/block_dev.c19
-rw-r--r--fs/btrfs/inode.c2
-rw-r--r--fs/btrfs/root-tree.c10
-rw-r--r--fs/ceph/addr.c4
-rw-r--r--fs/ceph/caps.c27
-rw-r--r--fs/ceph/inode.c2
-rw-r--r--fs/ceph/mds_client.c16
-rw-r--r--fs/ceph/mds_client.h1
-rw-r--r--fs/ceph/super.h1
-rw-r--r--fs/cifs/cifs_dfs_ref.c2
-rw-r--r--fs/cifs/cifsencrypt.c2
-rw-r--r--fs/cifs/cifsfs.c1
-rw-r--r--fs/cifs/cifsfs.h12
-rw-r--r--fs/cifs/cifssmb.c21
-rw-r--r--fs/cifs/connect.c3
-rw-r--r--fs/cifs/smb2pdu.c24
-rw-r--r--fs/dax.c21
-rw-r--r--fs/dcache.c20
-rw-r--r--fs/direct-io.c2
-rw-r--r--fs/ext2/file.c19
-rw-r--r--fs/ext2/inode.c16
-rw-r--r--fs/ext4/balloc.c7
-rw-r--r--fs/ext4/crypto.c56
-rw-r--r--fs/ext4/dir.c13
-rw-r--r--fs/ext4/ext4.h1
-rw-r--r--fs/ext4/extents.c4
-rw-r--r--fs/ext4/file.c28
-rw-r--r--fs/ext4/ialloc.c6
-rw-r--r--fs/ext4/inode.c78
-rw-r--r--fs/ext4/ioctl.c7
-rw-r--r--fs/ext4/mballoc.c2
-rw-r--r--fs/ext4/move_extent.c15
-rw-r--r--fs/ext4/namei.c26
-rw-r--r--fs/ext4/resize.c2
-rw-r--r--fs/fs-writeback.c39
-rw-r--r--fs/hpfs/namei.c31
-rw-r--r--fs/inode.c6
-rw-r--r--fs/jffs2/README.Locking5
-rw-r--r--fs/jffs2/build.c75
-rw-r--r--fs/jffs2/file.c39
-rw-r--r--fs/jffs2/gc.c17
-rw-r--r--fs/jffs2/nodelist.h6
-rw-r--r--fs/namei.c22
-rw-r--r--fs/nfs/blocklayout/extent_tree.c10
-rw-r--r--fs/nfs/nfs42proc.c119
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nfs/pnfs.c55
-rw-r--r--fs/notify/mark.c53
-rw-r--r--fs/ocfs2/aops.c1
-rw-r--r--fs/pnode.c9
-rw-r--r--fs/read_write.c9
-rw-r--r--fs/super.c1
-rw-r--r--fs/userfaultfd.c6
-rw-r--r--fs/xattr.c6
-rw-r--r--fs/xfs/xfs_aops.c6
-rw-r--r--fs/xfs/xfs_aops.h1
-rw-r--r--fs/xfs/xfs_bmap_util.c3
-rw-r--r--include/asm-generic/pgtable.h8
-rw-r--r--include/drm/drm_crtc.h8
-rw-r--r--include/dt-bindings/clock/tegra210-car.h2
-rw-r--r--include/linux/ata.h4
-rw-r--r--include/linux/bio.h37
-rw-r--r--include/linux/blkdev.h25
-rw-r--r--include/linux/ceph/ceph_features.h1
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/dax.h8
-rw-r--r--include/linux/dcache.h4
-rw-r--r--include/linux/fsnotify_backend.h5
-rw-r--r--include/linux/ftrace.h6
-rw-r--r--include/linux/intel-iommu.h3
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/libnvdimm.h3
-rw-r--r--include/linux/lightnvm.h4
-rw-r--r--include/linux/mlx4/device.h13
-rw-r--r--include/linux/mlx5/mlx5_ifc.h2968
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/nfs_fs.h4
-rw-r--r--include/linux/nfs_xdr.h1
-rw-r--r--include/linux/pci.h17
-rw-r--r--include/linux/perf_event.h7
-rw-r--r--include/linux/power/bq27xxx_battery.h1
-rw-r--r--include/linux/random.h1
-rw-r--r--include/linux/soc/ti/knav_dma.h4
-rw-r--r--include/linux/trace_events.h2
-rw-r--r--include/linux/tracepoint.h5
-rw-r--r--include/linux/writeback.h5
-rw-r--r--include/net/inet_connection_sock.h5
-rw-r--r--include/net/ip_fib.h1
-rw-r--r--include/sound/hdaudio.h2
-rw-r--r--include/uapi/linux/media.h54
-rw-r--r--include/uapi/linux/ndctl.h11
-rw-r--r--ipc/shm.c53
-rw-r--r--kernel/events/core.c372
-rw-r--r--kernel/memremap.c6
-rw-r--r--kernel/module.c4
-rw-r--r--kernel/resource.c5
-rw-r--r--kernel/sched/deadline.c2
-rw-r--r--kernel/trace/ftrace.c36
-rw-r--r--kernel/trace/trace_events.c17
-rw-r--r--kernel/trace/trace_events_filter.c13
-rw-r--r--kernel/trace/trace_stack.c6
-rw-r--r--mm/filemap.c12
-rw-r--r--mm/huge_memory.c13
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/memory.c14
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mmap.c34
-rw-r--r--mm/slab.c12
-rw-r--r--mm/slab.h1
-rw-r--r--mm/slab_common.c1
-rw-r--r--mm/slob.c4
-rw-r--r--mm/slub.c38
-rw-r--r--net/appletalk/ddp.c2
-rw-r--r--net/batman-adv/gateway_client.c7
-rw-r--r--net/batman-adv/hard-interface.c25
-rw-r--r--net/batman-adv/translation-table.c6
-rw-r--r--net/bluetooth/hci_core.c6
-rw-r--r--net/bridge/br_mdb.c4
-rw-r--r--net/caif/cfrfml.c2
-rw-r--r--net/ceph/messenger.c15
-rw-r--r--net/ceph/osd_client.c4
-rw-r--r--net/core/dev.c8
-rw-r--r--net/core/flow_dissector.c7
-rw-r--r--net/dccp/ipv4.c14
-rw-r--r--net/dccp/ipv6.c14
-rw-r--r--net/dsa/slave.c2
-rw-r--r--net/ipv4/devinet.c2
-rw-r--r--net/ipv4/inet_connection_sock.c14
-rw-r--r--net/ipv4/ip_gre.c5
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/ping.c4
-rw-r--r--net/ipv4/raw.c4
-rw-r--r--net/ipv4/route.c77
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c5
-rw-r--r--net/ipv4/tcp_ipv4.c20
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/ip6_gre.c1
-rw-r--r--net/ipv6/netfilter/nf_nat_masquerade_ipv6.c74
-rw-r--r--net/ipv6/tcp_ipv6.c14
-rw-r--r--net/l2tp/l2tp_netlink.c18
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/nf_conntrack_core.c5
-rw-r--r--net/netfilter/nfnetlink.c16
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c2
-rw-r--r--net/netfilter/nft_counter.c4
-rw-r--r--net/netfilter/xt_TEE.c4
-rw-r--r--net/openvswitch/vport-vxlan.c2
-rw-r--r--net/sched/sch_api.c1
-rw-r--r--net/sctp/protocol.c46
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c2
-rw-r--r--net/sunrpc/cache.c2
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c2
-rw-r--r--net/tipc/link.c4
-rw-r--r--net/tipc/node.c12
-rw-r--r--net/unix/af_unix.c26
-rw-r--r--net/unix/diag.c2
-rw-r--r--net/vmw_vsock/af_vsock.c19
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--sound/core/control_compat.c90
-rw-r--r--sound/core/pcm_compat.c177
-rw-r--r--sound/core/pcm_native.c16
-rw-r--r--sound/core/rawmidi_compat.c56
-rw-r--r--sound/core/seq/oss/seq_oss.c2
-rw-r--r--sound/core/seq/oss/seq_oss_device.h1
-rw-r--r--sound/core/seq/oss/seq_oss_init.c16
-rw-r--r--sound/core/seq/seq_memory.c13
-rw-r--r--sound/core/seq/seq_ports.c13
-rw-r--r--sound/core/timer_compat.c18
-rw-r--r--sound/hda/hdac_controller.c7
-rw-r--r--sound/pci/hda/hda_controller.c47
-rw-r--r--sound/pci/hda/hda_intel.c20
-rw-r--r--sound/pci/hda/patch_hdmi.c19
-rw-r--r--sound/pci/hda/patch_realtek.c40
-rw-r--r--sound/pci/rme9652/hdsp.c4
-rw-r--r--sound/pci/rme9652/hdspm.c16
-rw-r--r--sound/usb/quirks.c1
-rw-r--r--tools/testing/nvdimm/test/nfit.c8
-rw-r--r--tools/testing/selftests/ftrace/test.d/instances/instance.tc15
-rw-r--r--virt/kvm/arm/arch_timer.c9
-rw-r--r--virt/kvm/arm/vgic.c4
-rw-r--r--virt/kvm/async_pf.c2
649 files changed, 8473 insertions, 5775 deletions
diff --git a/Documentation/DocBook/media/v4l/media-types.xml b/Documentation/DocBook/media/v4l/media-types.xml
index 1af384250910..0ee0f3386cdf 100644
--- a/Documentation/DocBook/media/v4l/media-types.xml
+++ b/Documentation/DocBook/media/v4l/media-types.xml
@@ -57,10 +57,6 @@
57 <entry>Connector for a RGB composite signal.</entry> 57 <entry>Connector for a RGB composite signal.</entry>
58 </row> 58 </row>
59 <row> 59 <row>
60 <entry><constant>MEDIA_ENT_F_CONN_TEST</constant></entry>
61 <entry>Connector for a test generator.</entry>
62 </row>
63 <row>
64 <entry><constant>MEDIA_ENT_F_CAM_SENSOR</constant></entry> 60 <entry><constant>MEDIA_ENT_F_CAM_SENSOR</constant></entry>
65 <entry>Camera video sensor entity.</entry> 61 <entry>Camera video sensor entity.</entry>
66 </row> 62 </row>
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt
index ace05992a262..20df350b9ef3 100644
--- a/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt
@@ -30,7 +30,7 @@ that they are defined using standard clock bindings with following
30clock-output-names: 30clock-output-names:
31 - "xin24m" - crystal input - required, 31 - "xin24m" - crystal input - required,
32 - "ext_i2s" - external I2S clock - optional, 32 - "ext_i2s" - external I2S clock - optional,
33 - "ext_gmac" - external GMAC clock - optional 33 - "rmii_clkin" - external EMAC clock - optional
34 34
35Example: Clock controller node: 35Example: Clock controller node:
36 36
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
index 7803e77d85cb..007a5b46256a 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
@@ -24,9 +24,8 @@ Main node required properties:
24 1 = edge triggered 24 1 = edge triggered
25 4 = level triggered 25 4 = level triggered
26 26
27 Cells 4 and beyond are reserved for future use. When the 1st cell 27 Cells 4 and beyond are reserved for future use and must have a value
28 has a value of 0 or 1, cells 4 and beyond act as padding, and may be 28 of 0 if present.
29 ignored. It is recommended that padding cells have a value of 0.
30 29
31- reg : Specifies base physical address(s) and size of the GIC 30- reg : Specifies base physical address(s) and size of the GIC
32 registers, in the following order: 31 registers, in the following order:
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt
index 81a9f9e6b45f..c8ac222eac67 100644
--- a/Documentation/devicetree/bindings/net/renesas,ravb.txt
+++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt
@@ -82,8 +82,8 @@ Example:
82 "ch16", "ch17", "ch18", "ch19", 82 "ch16", "ch17", "ch18", "ch19",
83 "ch20", "ch21", "ch22", "ch23", 83 "ch20", "ch21", "ch22", "ch23",
84 "ch24"; 84 "ch24";
85 clocks = <&mstp8_clks R8A7795_CLK_ETHERAVB>; 85 clocks = <&cpg CPG_MOD 812>;
86 power-domains = <&cpg_clocks>; 86 power-domains = <&cpg>;
87 phy-mode = "rgmii-id"; 87 phy-mode = "rgmii-id";
88 phy-handle = <&phy0>; 88 phy-handle = <&phy0>;
89 89
diff --git a/Documentation/devicetree/bindings/regulator/tps65217.txt b/Documentation/devicetree/bindings/regulator/tps65217.txt
index d18109657da6..4f05d208c95c 100644
--- a/Documentation/devicetree/bindings/regulator/tps65217.txt
+++ b/Documentation/devicetree/bindings/regulator/tps65217.txt
@@ -26,11 +26,7 @@ Example:
26 ti,pmic-shutdown-controller; 26 ti,pmic-shutdown-controller;
27 27
28 regulators { 28 regulators {
29 #address-cells = <1>;
30 #size-cells = <0>;
31
32 dcdc1_reg: dcdc1 { 29 dcdc1_reg: dcdc1 {
33 reg = <0>;
34 regulator-min-microvolt = <900000>; 30 regulator-min-microvolt = <900000>;
35 regulator-max-microvolt = <1800000>; 31 regulator-max-microvolt = <1800000>;
36 regulator-boot-on; 32 regulator-boot-on;
@@ -38,7 +34,6 @@ Example:
38 }; 34 };
39 35
40 dcdc2_reg: dcdc2 { 36 dcdc2_reg: dcdc2 {
41 reg = <1>;
42 regulator-min-microvolt = <900000>; 37 regulator-min-microvolt = <900000>;
43 regulator-max-microvolt = <3300000>; 38 regulator-max-microvolt = <3300000>;
44 regulator-boot-on; 39 regulator-boot-on;
@@ -46,7 +41,6 @@ Example:
46 }; 41 };
47 42
48 dcdc3_reg: dcc3 { 43 dcdc3_reg: dcc3 {
49 reg = <2>;
50 regulator-min-microvolt = <900000>; 44 regulator-min-microvolt = <900000>;
51 regulator-max-microvolt = <1500000>; 45 regulator-max-microvolt = <1500000>;
52 regulator-boot-on; 46 regulator-boot-on;
@@ -54,7 +48,6 @@ Example:
54 }; 48 };
55 49
56 ldo1_reg: ldo1 { 50 ldo1_reg: ldo1 {
57 reg = <3>;
58 regulator-min-microvolt = <1000000>; 51 regulator-min-microvolt = <1000000>;
59 regulator-max-microvolt = <3300000>; 52 regulator-max-microvolt = <3300000>;
60 regulator-boot-on; 53 regulator-boot-on;
@@ -62,7 +55,6 @@ Example:
62 }; 55 };
63 56
64 ldo2_reg: ldo2 { 57 ldo2_reg: ldo2 {
65 reg = <4>;
66 regulator-min-microvolt = <900000>; 58 regulator-min-microvolt = <900000>;
67 regulator-max-microvolt = <3300000>; 59 regulator-max-microvolt = <3300000>;
68 regulator-boot-on; 60 regulator-boot-on;
@@ -70,7 +62,6 @@ Example:
70 }; 62 };
71 63
72 ldo3_reg: ldo3 { 64 ldo3_reg: ldo3 {
73 reg = <5>;
74 regulator-min-microvolt = <1800000>; 65 regulator-min-microvolt = <1800000>;
75 regulator-max-microvolt = <3300000>; 66 regulator-max-microvolt = <3300000>;
76 regulator-boot-on; 67 regulator-boot-on;
@@ -78,7 +69,6 @@ Example:
78 }; 69 };
79 70
80 ldo4_reg: ldo4 { 71 ldo4_reg: ldo4 {
81 reg = <6>;
82 regulator-min-microvolt = <1800000>; 72 regulator-min-microvolt = <1800000>;
83 regulator-max-microvolt = <3300000>; 73 regulator-max-microvolt = <3300000>;
84 regulator-boot-on; 74 regulator-boot-on;
diff --git a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
index ac2fcd6ff4b8..1068ffce9f91 100644
--- a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
@@ -14,6 +14,10 @@ Required properties:
14 interrupt number is the rtc alarm interrupt and second interrupt number 14 interrupt number is the rtc alarm interrupt and second interrupt number
15 is the rtc tick interrupt. The number of cells representing a interrupt 15 is the rtc tick interrupt. The number of cells representing a interrupt
16 depends on the parent interrupt controller. 16 depends on the parent interrupt controller.
17- clocks: Must contain a list of phandle and clock specifier for the rtc
18 and source clocks.
19- clock-names: Must contain "rtc" and "rtc_src" entries sorted in the
20 same order as the clocks property.
17 21
18Example: 22Example:
19 23
@@ -21,4 +25,6 @@ Example:
21 compatible = "samsung,s3c6410-rtc"; 25 compatible = "samsung,s3c6410-rtc";
22 reg = <0x10070000 0x100>; 26 reg = <0x10070000 0x100>;
23 interrupts = <44 0 45 0>; 27 interrupts = <44 0 45 0>;
28 clocks = <&clock CLK_RTC>, <&s2mps11_osc S2MPS11_CLK_AP>;
29 clock-names = "rtc", "rtc_src";
24 }; 30 };
diff --git a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
index 35ae1fb3537f..ed94c217c98d 100644
--- a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
+++ b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
@@ -9,7 +9,7 @@ Optional properties:
9- fsl,uart-has-rtscts : Indicate the uart has rts and cts 9- fsl,uart-has-rtscts : Indicate the uart has rts and cts
10- fsl,irda-mode : Indicate the uart supports irda mode 10- fsl,irda-mode : Indicate the uart supports irda mode
11- fsl,dte-mode : Indicate the uart works in DTE mode. The uart works 11- fsl,dte-mode : Indicate the uart works in DTE mode. The uart works
12 is DCE mode by default. 12 in DCE mode by default.
13 13
14Note: Each uart controller should have an alias correctly numbered 14Note: Each uart controller should have an alias correctly numbered
15in "aliases" node. 15in "aliases" node.
diff --git a/Documentation/watchdog/watchdog-parameters.txt b/Documentation/watchdog/watchdog-parameters.txt
index 9f9ec9f76039..4e4b6f10d841 100644
--- a/Documentation/watchdog/watchdog-parameters.txt
+++ b/Documentation/watchdog/watchdog-parameters.txt
@@ -400,3 +400,7 @@ wm8350_wdt:
400nowayout: Watchdog cannot be stopped once started 400nowayout: Watchdog cannot be stopped once started
401 (default=kernel config parameter) 401 (default=kernel config parameter)
402------------------------------------------------- 402-------------------------------------------------
403sun4v_wdt:
404timeout_ms: Watchdog timeout in milliseconds 1..180000, default=60000)
405nowayout: Watchdog cannot be stopped once started
406-------------------------------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index 28eb61bbecf4..f5e6a535bc34 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -920,17 +920,24 @@ M: Emilio López <emilio@elopez.com.ar>
920S: Maintained 920S: Maintained
921F: drivers/clk/sunxi/ 921F: drivers/clk/sunxi/
922 922
923ARM/Amlogic MesonX SoC support 923ARM/Amlogic Meson SoC support
924M: Carlo Caione <carlo@caione.org> 924M: Carlo Caione <carlo@caione.org>
925L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 925L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
926L: linux-meson@googlegroups.com
927W: http://linux-meson.com/
926S: Maintained 928S: Maintained
927F: drivers/media/rc/meson-ir.c 929F: arch/arm/mach-meson/
928N: meson[x68] 930F: arch/arm/boot/dts/meson*
931N: meson
929 932
930ARM/Annapurna Labs ALPINE ARCHITECTURE 933ARM/Annapurna Labs ALPINE ARCHITECTURE
931M: Tsahee Zidenberg <tsahee@annapurnalabs.com> 934M: Tsahee Zidenberg <tsahee@annapurnalabs.com>
935M: Antoine Tenart <antoine.tenart@free-electrons.com>
932S: Maintained 936S: Maintained
933F: arch/arm/mach-alpine/ 937F: arch/arm/mach-alpine/
938F: arch/arm/boot/dts/alpine*
939F: arch/arm64/boot/dts/al/
940F: drivers/*/*alpine*
934 941
935ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT 942ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
936M: Nicolas Ferre <nicolas.ferre@atmel.com> 943M: Nicolas Ferre <nicolas.ferre@atmel.com>
@@ -3444,7 +3451,6 @@ F: drivers/usb/dwc2/
3444DESIGNWARE USB3 DRD IP DRIVER 3451DESIGNWARE USB3 DRD IP DRIVER
3445M: Felipe Balbi <balbi@kernel.org> 3452M: Felipe Balbi <balbi@kernel.org>
3446L: linux-usb@vger.kernel.org 3453L: linux-usb@vger.kernel.org
3447L: linux-omap@vger.kernel.org
3448T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git 3454T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
3449S: Maintained 3455S: Maintained
3450F: drivers/usb/dwc3/ 3456F: drivers/usb/dwc3/
@@ -4512,6 +4518,12 @@ L: linuxppc-dev@lists.ozlabs.org
4512S: Maintained 4518S: Maintained
4513F: drivers/dma/fsldma.* 4519F: drivers/dma/fsldma.*
4514 4520
4521FREESCALE GPMI NAND DRIVER
4522M: Han Xu <han.xu@nxp.com>
4523L: linux-mtd@lists.infradead.org
4524S: Maintained
4525F: drivers/mtd/nand/gpmi-nand/*
4526
4515FREESCALE I2C CPM DRIVER 4527FREESCALE I2C CPM DRIVER
4516M: Jochen Friedrich <jochen@scram.de> 4528M: Jochen Friedrich <jochen@scram.de>
4517L: linuxppc-dev@lists.ozlabs.org 4529L: linuxppc-dev@lists.ozlabs.org
@@ -4528,7 +4540,7 @@ F: include/linux/platform_data/video-imxfb.h
4528F: drivers/video/fbdev/imxfb.c 4540F: drivers/video/fbdev/imxfb.c
4529 4541
4530FREESCALE QUAD SPI DRIVER 4542FREESCALE QUAD SPI DRIVER
4531M: Han Xu <han.xu@freescale.com> 4543M: Han Xu <han.xu@nxp.com>
4532L: linux-mtd@lists.infradead.org 4544L: linux-mtd@lists.infradead.org
4533S: Maintained 4545S: Maintained
4534F: drivers/mtd/spi-nor/fsl-quadspi.c 4546F: drivers/mtd/spi-nor/fsl-quadspi.c
@@ -6128,7 +6140,7 @@ F: include/uapi/linux/sunrpc/
6128 6140
6129KERNEL SELFTEST FRAMEWORK 6141KERNEL SELFTEST FRAMEWORK
6130M: Shuah Khan <shuahkh@osg.samsung.com> 6142M: Shuah Khan <shuahkh@osg.samsung.com>
6131L: linux-api@vger.kernel.org 6143L: linux-kselftest@vger.kernel.org
6132T: git git://git.kernel.org/pub/scm/shuah/linux-kselftest 6144T: git git://git.kernel.org/pub/scm/shuah/linux-kselftest
6133S: Maintained 6145S: Maintained
6134F: tools/testing/selftests 6146F: tools/testing/selftests
@@ -7354,7 +7366,7 @@ F: drivers/tty/isicom.c
7354F: include/linux/isicom.h 7366F: include/linux/isicom.h
7355 7367
7356MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER 7368MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
7357M: Felipe Balbi <balbi@kernel.org> 7369M: Bin Liu <b-liu@ti.com>
7358L: linux-usb@vger.kernel.org 7370L: linux-usb@vger.kernel.org
7359T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git 7371T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
7360S: Maintained 7372S: Maintained
@@ -7686,13 +7698,13 @@ S: Maintained
7686F: arch/nios2/ 7698F: arch/nios2/
7687 7699
7688NOKIA N900 POWER SUPPLY DRIVERS 7700NOKIA N900 POWER SUPPLY DRIVERS
7689M: Pali Rohár <pali.rohar@gmail.com> 7701R: Pali Rohár <pali.rohar@gmail.com>
7690S: Maintained
7691F: include/linux/power/bq2415x_charger.h 7702F: include/linux/power/bq2415x_charger.h
7692F: include/linux/power/bq27xxx_battery.h 7703F: include/linux/power/bq27xxx_battery.h
7693F: include/linux/power/isp1704_charger.h 7704F: include/linux/power/isp1704_charger.h
7694F: drivers/power/bq2415x_charger.c 7705F: drivers/power/bq2415x_charger.c
7695F: drivers/power/bq27xxx_battery.c 7706F: drivers/power/bq27xxx_battery.c
7707F: drivers/power/bq27xxx_battery_i2c.c
7696F: drivers/power/isp1704_charger.c 7708F: drivers/power/isp1704_charger.c
7697F: drivers/power/rx51_battery.c 7709F: drivers/power/rx51_battery.c
7698 7710
@@ -7923,11 +7935,9 @@ F: drivers/media/platform/omap3isp/
7923F: drivers/staging/media/omap4iss/ 7935F: drivers/staging/media/omap4iss/
7924 7936
7925OMAP USB SUPPORT 7937OMAP USB SUPPORT
7926M: Felipe Balbi <balbi@kernel.org>
7927L: linux-usb@vger.kernel.org 7938L: linux-usb@vger.kernel.org
7928L: linux-omap@vger.kernel.org 7939L: linux-omap@vger.kernel.org
7929T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git 7940S: Orphan
7930S: Maintained
7931F: drivers/usb/*/*omap* 7941F: drivers/usb/*/*omap*
7932F: arch/arm/*omap*/usb* 7942F: arch/arm/*omap*/usb*
7933 7943
@@ -9558,6 +9568,12 @@ M: Andreas Noever <andreas.noever@gmail.com>
9558S: Maintained 9568S: Maintained
9559F: drivers/thunderbolt/ 9569F: drivers/thunderbolt/
9560 9570
9571TI BQ27XXX POWER SUPPLY DRIVER
9572R: Andrew F. Davis <afd@ti.com>
9573F: include/linux/power/bq27xxx_battery.h
9574F: drivers/power/bq27xxx_battery.c
9575F: drivers/power/bq27xxx_battery_i2c.c
9576
9561TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER 9577TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER
9562M: John Stultz <john.stultz@linaro.org> 9578M: John Stultz <john.stultz@linaro.org>
9563M: Thomas Gleixner <tglx@linutronix.de> 9579M: Thomas Gleixner <tglx@linutronix.de>
@@ -12013,7 +12029,6 @@ F: arch/arm64/xen/
12013F: arch/arm64/include/asm/xen/ 12029F: arch/arm64/include/asm/xen/
12014 12030
12015XEN NETWORK BACKEND DRIVER 12031XEN NETWORK BACKEND DRIVER
12016M: Ian Campbell <ian.campbell@citrix.com>
12017M: Wei Liu <wei.liu2@citrix.com> 12032M: Wei Liu <wei.liu2@citrix.com>
12018L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 12033L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
12019L: netdev@vger.kernel.org 12034L: netdev@vger.kernel.org
diff --git a/Makefile b/Makefile
index 701c36056ca8..2d519d2fb3a9 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 5 2PATCHLEVEL = 5
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc4 4EXTRAVERSION = -rc7
5NAME = Blurry Fish Butt 5NAME = Blurry Fish Butt
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 0655495470ad..8a188bc1786a 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -12,8 +12,6 @@ config ARC
12 select BUILDTIME_EXTABLE_SORT 12 select BUILDTIME_EXTABLE_SORT
13 select COMMON_CLK 13 select COMMON_CLK
14 select CLONE_BACKWARDS 14 select CLONE_BACKWARDS
15 # ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev
16 select DEVTMPFS if !INITRAMFS_SOURCE=""
17 select GENERIC_ATOMIC64 15 select GENERIC_ATOMIC64
18 select GENERIC_CLOCKEVENTS 16 select GENERIC_CLOCKEVENTS
19 select GENERIC_FIND_FIRST_BIT 17 select GENERIC_FIND_FIRST_BIT
@@ -275,14 +273,6 @@ config ARC_DCCM_BASE
275 default "0xA0000000" 273 default "0xA0000000"
276 depends on ARC_HAS_DCCM 274 depends on ARC_HAS_DCCM
277 275
278config ARC_HAS_HW_MPY
279 bool "Use Hardware Multiplier (Normal or Faster XMAC)"
280 default y
281 help
282 Influences how gcc generates code for MPY operations.
283 If enabled, MPYxx insns are generated, provided by Standard/XMAC
284 Multipler. Otherwise software multipy lib is used
285
286choice 276choice
287 prompt "MMU Version" 277 prompt "MMU Version"
288 default ARC_MMU_V3 if ARC_CPU_770 278 default ARC_MMU_V3 if ARC_CPU_770
@@ -542,14 +532,6 @@ config ARC_DBG_TLB_MISS_COUNT
542 Counts number of I and D TLB Misses and exports them via Debugfs 532 Counts number of I and D TLB Misses and exports them via Debugfs
543 The counters can be cleared via Debugfs as well 533 The counters can be cleared via Debugfs as well
544 534
545if SMP
546
547config ARC_IPI_DBG
548 bool "Debug Inter Core interrupts"
549 default n
550
551endif
552
553endif 535endif
554 536
555config ARC_UBOOT_SUPPORT 537config ARC_UBOOT_SUPPORT
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index aeb19021099e..c8230f3395f2 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -74,10 +74,6 @@ ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
74# --build-id w/o "-marclinux". Default arc-elf32-ld is OK 74# --build-id w/o "-marclinux". Default arc-elf32-ld is OK
75ldflags-$(upto_gcc44) += -marclinux 75ldflags-$(upto_gcc44) += -marclinux
76 76
77ifndef CONFIG_ARC_HAS_HW_MPY
78 cflags-y += -mno-mpy
79endif
80
81LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name) 77LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
82 78
83# Modules with short calls might break for calls into builtin-kernel 79# Modules with short calls might break for calls into builtin-kernel
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index f1ac9818b751..5d4e2a07ad3e 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -39,6 +39,7 @@ CONFIG_IP_PNP_RARP=y
39# CONFIG_INET_XFRM_MODE_TUNNEL is not set 39# CONFIG_INET_XFRM_MODE_TUNNEL is not set
40# CONFIG_INET_XFRM_MODE_BEET is not set 40# CONFIG_INET_XFRM_MODE_BEET is not set
41# CONFIG_IPV6 is not set 41# CONFIG_IPV6 is not set
42CONFIG_DEVTMPFS=y
42# CONFIG_STANDALONE is not set 43# CONFIG_STANDALONE is not set
43# CONFIG_PREVENT_FIRMWARE_BUILD is not set 44# CONFIG_PREVENT_FIRMWARE_BUILD is not set
44# CONFIG_FIRMWARE_IN_KERNEL is not set 45# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -73,7 +74,6 @@ CONFIG_I2C_CHARDEV=y
73CONFIG_I2C_DESIGNWARE_PLATFORM=y 74CONFIG_I2C_DESIGNWARE_PLATFORM=y
74# CONFIG_HWMON is not set 75# CONFIG_HWMON is not set
75CONFIG_FB=y 76CONFIG_FB=y
76# CONFIG_VGA_CONSOLE is not set
77CONFIG_FRAMEBUFFER_CONSOLE=y 77CONFIG_FRAMEBUFFER_CONSOLE=y
78CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y 78CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
79CONFIG_LOGO=y 79CONFIG_LOGO=y
@@ -91,12 +91,10 @@ CONFIG_MMC_SDHCI_PLTFM=y
91CONFIG_MMC_DW=y 91CONFIG_MMC_DW=y
92# CONFIG_IOMMU_SUPPORT is not set 92# CONFIG_IOMMU_SUPPORT is not set
93CONFIG_EXT3_FS=y 93CONFIG_EXT3_FS=y
94CONFIG_EXT4_FS=y
95CONFIG_MSDOS_FS=y 94CONFIG_MSDOS_FS=y
96CONFIG_VFAT_FS=y 95CONFIG_VFAT_FS=y
97CONFIG_NTFS_FS=y 96CONFIG_NTFS_FS=y
98CONFIG_TMPFS=y 97CONFIG_TMPFS=y
99CONFIG_JFFS2_FS=y
100CONFIG_NFS_FS=y 98CONFIG_NFS_FS=y
101CONFIG_NLS_CODEPAGE_437=y 99CONFIG_NLS_CODEPAGE_437=y
102CONFIG_NLS_ISO8859_1=y 100CONFIG_NLS_ISO8859_1=y
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index 323486d6ee83..87ee46b237ef 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -39,14 +39,10 @@ CONFIG_IP_PNP_RARP=y
39# CONFIG_INET_XFRM_MODE_TUNNEL is not set 39# CONFIG_INET_XFRM_MODE_TUNNEL is not set
40# CONFIG_INET_XFRM_MODE_BEET is not set 40# CONFIG_INET_XFRM_MODE_BEET is not set
41# CONFIG_IPV6 is not set 41# CONFIG_IPV6 is not set
42CONFIG_DEVTMPFS=y
42# CONFIG_STANDALONE is not set 43# CONFIG_STANDALONE is not set
43# CONFIG_PREVENT_FIRMWARE_BUILD is not set 44# CONFIG_PREVENT_FIRMWARE_BUILD is not set
44# CONFIG_FIRMWARE_IN_KERNEL is not set 45# CONFIG_FIRMWARE_IN_KERNEL is not set
45CONFIG_MTD=y
46CONFIG_MTD_CMDLINE_PARTS=y
47CONFIG_MTD_BLOCK=y
48CONFIG_MTD_NAND=y
49CONFIG_MTD_NAND_AXS=y
50CONFIG_SCSI=y 46CONFIG_SCSI=y
51CONFIG_BLK_DEV_SD=y 47CONFIG_BLK_DEV_SD=y
52CONFIG_NETDEVICES=y 48CONFIG_NETDEVICES=y
@@ -78,14 +74,12 @@ CONFIG_I2C_CHARDEV=y
78CONFIG_I2C_DESIGNWARE_PLATFORM=y 74CONFIG_I2C_DESIGNWARE_PLATFORM=y
79# CONFIG_HWMON is not set 75# CONFIG_HWMON is not set
80CONFIG_FB=y 76CONFIG_FB=y
81# CONFIG_VGA_CONSOLE is not set
82CONFIG_FRAMEBUFFER_CONSOLE=y 77CONFIG_FRAMEBUFFER_CONSOLE=y
83CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y 78CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
84CONFIG_LOGO=y 79CONFIG_LOGO=y
85# CONFIG_LOGO_LINUX_MONO is not set 80# CONFIG_LOGO_LINUX_MONO is not set
86# CONFIG_LOGO_LINUX_VGA16 is not set 81# CONFIG_LOGO_LINUX_VGA16 is not set
87# CONFIG_LOGO_LINUX_CLUT224 is not set 82# CONFIG_LOGO_LINUX_CLUT224 is not set
88CONFIG_USB=y
89CONFIG_USB_EHCI_HCD=y 83CONFIG_USB_EHCI_HCD=y
90CONFIG_USB_EHCI_HCD_PLATFORM=y 84CONFIG_USB_EHCI_HCD_PLATFORM=y
91CONFIG_USB_OHCI_HCD=y 85CONFIG_USB_OHCI_HCD=y
@@ -97,12 +91,10 @@ CONFIG_MMC_SDHCI_PLTFM=y
97CONFIG_MMC_DW=y 91CONFIG_MMC_DW=y
98# CONFIG_IOMMU_SUPPORT is not set 92# CONFIG_IOMMU_SUPPORT is not set
99CONFIG_EXT3_FS=y 93CONFIG_EXT3_FS=y
100CONFIG_EXT4_FS=y
101CONFIG_MSDOS_FS=y 94CONFIG_MSDOS_FS=y
102CONFIG_VFAT_FS=y 95CONFIG_VFAT_FS=y
103CONFIG_NTFS_FS=y 96CONFIG_NTFS_FS=y
104CONFIG_TMPFS=y 97CONFIG_TMPFS=y
105CONFIG_JFFS2_FS=y
106CONFIG_NFS_FS=y 98CONFIG_NFS_FS=y
107CONFIG_NLS_CODEPAGE_437=y 99CONFIG_NLS_CODEPAGE_437=y
108CONFIG_NLS_ISO8859_1=y 100CONFIG_NLS_ISO8859_1=y
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index 66191cd0447e..d80daf4f7e73 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -40,14 +40,10 @@ CONFIG_IP_PNP_RARP=y
40# CONFIG_INET_XFRM_MODE_TUNNEL is not set 40# CONFIG_INET_XFRM_MODE_TUNNEL is not set
41# CONFIG_INET_XFRM_MODE_BEET is not set 41# CONFIG_INET_XFRM_MODE_BEET is not set
42# CONFIG_IPV6 is not set 42# CONFIG_IPV6 is not set
43CONFIG_DEVTMPFS=y
43# CONFIG_STANDALONE is not set 44# CONFIG_STANDALONE is not set
44# CONFIG_PREVENT_FIRMWARE_BUILD is not set 45# CONFIG_PREVENT_FIRMWARE_BUILD is not set
45# CONFIG_FIRMWARE_IN_KERNEL is not set 46# CONFIG_FIRMWARE_IN_KERNEL is not set
46CONFIG_MTD=y
47CONFIG_MTD_CMDLINE_PARTS=y
48CONFIG_MTD_BLOCK=y
49CONFIG_MTD_NAND=y
50CONFIG_MTD_NAND_AXS=y
51CONFIG_SCSI=y 47CONFIG_SCSI=y
52CONFIG_BLK_DEV_SD=y 48CONFIG_BLK_DEV_SD=y
53CONFIG_NETDEVICES=y 49CONFIG_NETDEVICES=y
@@ -79,14 +75,12 @@ CONFIG_I2C_CHARDEV=y
79CONFIG_I2C_DESIGNWARE_PLATFORM=y 75CONFIG_I2C_DESIGNWARE_PLATFORM=y
80# CONFIG_HWMON is not set 76# CONFIG_HWMON is not set
81CONFIG_FB=y 77CONFIG_FB=y
82# CONFIG_VGA_CONSOLE is not set
83CONFIG_FRAMEBUFFER_CONSOLE=y 78CONFIG_FRAMEBUFFER_CONSOLE=y
84CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y 79CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
85CONFIG_LOGO=y 80CONFIG_LOGO=y
86# CONFIG_LOGO_LINUX_MONO is not set 81# CONFIG_LOGO_LINUX_MONO is not set
87# CONFIG_LOGO_LINUX_VGA16 is not set 82# CONFIG_LOGO_LINUX_VGA16 is not set
88# CONFIG_LOGO_LINUX_CLUT224 is not set 83# CONFIG_LOGO_LINUX_CLUT224 is not set
89CONFIG_USB=y
90CONFIG_USB_EHCI_HCD=y 84CONFIG_USB_EHCI_HCD=y
91CONFIG_USB_EHCI_HCD_PLATFORM=y 85CONFIG_USB_EHCI_HCD_PLATFORM=y
92CONFIG_USB_OHCI_HCD=y 86CONFIG_USB_OHCI_HCD=y
@@ -98,12 +92,10 @@ CONFIG_MMC_SDHCI_PLTFM=y
98CONFIG_MMC_DW=y 92CONFIG_MMC_DW=y
99# CONFIG_IOMMU_SUPPORT is not set 93# CONFIG_IOMMU_SUPPORT is not set
100CONFIG_EXT3_FS=y 94CONFIG_EXT3_FS=y
101CONFIG_EXT4_FS=y
102CONFIG_MSDOS_FS=y 95CONFIG_MSDOS_FS=y
103CONFIG_VFAT_FS=y 96CONFIG_VFAT_FS=y
104CONFIG_NTFS_FS=y 97CONFIG_NTFS_FS=y
105CONFIG_TMPFS=y 98CONFIG_TMPFS=y
106CONFIG_JFFS2_FS=y
107CONFIG_NFS_FS=y 99CONFIG_NFS_FS=y
108CONFIG_NLS_CODEPAGE_437=y 100CONFIG_NLS_CODEPAGE_437=y
109CONFIG_NLS_ISO8859_1=y 101CONFIG_NLS_ISO8859_1=y
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
index 138f9d887957..f41095340b6a 100644
--- a/arch/arc/configs/nsim_700_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -4,6 +4,7 @@ CONFIG_DEFAULT_HOSTNAME="ARCLinux"
4# CONFIG_SWAP is not set 4# CONFIG_SWAP is not set
5CONFIG_SYSVIPC=y 5CONFIG_SYSVIPC=y
6CONFIG_POSIX_MQUEUE=y 6CONFIG_POSIX_MQUEUE=y
7# CONFIG_CROSS_MEMORY_ATTACH is not set
7CONFIG_HIGH_RES_TIMERS=y 8CONFIG_HIGH_RES_TIMERS=y
8CONFIG_IKCONFIG=y 9CONFIG_IKCONFIG=y
9CONFIG_IKCONFIG_PROC=y 10CONFIG_IKCONFIG_PROC=y
@@ -26,7 +27,6 @@ CONFIG_ARC_PLAT_SIM=y
26CONFIG_ARC_BUILTIN_DTB_NAME="nsim_700" 27CONFIG_ARC_BUILTIN_DTB_NAME="nsim_700"
27CONFIG_PREEMPT=y 28CONFIG_PREEMPT=y
28# CONFIG_COMPACTION is not set 29# CONFIG_COMPACTION is not set
29# CONFIG_CROSS_MEMORY_ATTACH is not set
30CONFIG_NET=y 30CONFIG_NET=y
31CONFIG_PACKET=y 31CONFIG_PACKET=y
32CONFIG_UNIX=y 32CONFIG_UNIX=y
@@ -34,6 +34,7 @@ CONFIG_UNIX_DIAG=y
34CONFIG_NET_KEY=y 34CONFIG_NET_KEY=y
35CONFIG_INET=y 35CONFIG_INET=y
36# CONFIG_IPV6 is not set 36# CONFIG_IPV6 is not set
37CONFIG_DEVTMPFS=y
37# CONFIG_STANDALONE is not set 38# CONFIG_STANDALONE is not set
38# CONFIG_PREVENT_FIRMWARE_BUILD is not set 39# CONFIG_PREVENT_FIRMWARE_BUILD is not set
39# CONFIG_FIRMWARE_IN_KERNEL is not set 40# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -51,7 +52,6 @@ CONFIG_SERIAL_ARC=y
51CONFIG_SERIAL_ARC_CONSOLE=y 52CONFIG_SERIAL_ARC_CONSOLE=y
52# CONFIG_HW_RANDOM is not set 53# CONFIG_HW_RANDOM is not set
53# CONFIG_HWMON is not set 54# CONFIG_HWMON is not set
54# CONFIG_VGA_CONSOLE is not set
55# CONFIG_HID is not set 55# CONFIG_HID is not set
56# CONFIG_USB_SUPPORT is not set 56# CONFIG_USB_SUPPORT is not set
57# CONFIG_IOMMU_SUPPORT is not set 57# CONFIG_IOMMU_SUPPORT is not set
@@ -63,4 +63,3 @@ CONFIG_NFS_FS=y
63# CONFIG_ENABLE_WARN_DEPRECATED is not set 63# CONFIG_ENABLE_WARN_DEPRECATED is not set
64# CONFIG_ENABLE_MUST_CHECK is not set 64# CONFIG_ENABLE_MUST_CHECK is not set
65# CONFIG_DEBUG_PREEMPT is not set 65# CONFIG_DEBUG_PREEMPT is not set
66CONFIG_XZ_DEC=y
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
index f68838e8068a..cfaa33cb5921 100644
--- a/arch/arc/configs/nsim_hs_defconfig
+++ b/arch/arc/configs/nsim_hs_defconfig
@@ -35,6 +35,7 @@ CONFIG_UNIX_DIAG=y
35CONFIG_NET_KEY=y 35CONFIG_NET_KEY=y
36CONFIG_INET=y 36CONFIG_INET=y
37# CONFIG_IPV6 is not set 37# CONFIG_IPV6 is not set
38CONFIG_DEVTMPFS=y
38# CONFIG_STANDALONE is not set 39# CONFIG_STANDALONE is not set
39# CONFIG_PREVENT_FIRMWARE_BUILD is not set 40# CONFIG_PREVENT_FIRMWARE_BUILD is not set
40# CONFIG_FIRMWARE_IN_KERNEL is not set 41# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -49,7 +50,6 @@ CONFIG_SERIAL_ARC=y
49CONFIG_SERIAL_ARC_CONSOLE=y 50CONFIG_SERIAL_ARC_CONSOLE=y
50# CONFIG_HW_RANDOM is not set 51# CONFIG_HW_RANDOM is not set
51# CONFIG_HWMON is not set 52# CONFIG_HWMON is not set
52# CONFIG_VGA_CONSOLE is not set
53# CONFIG_HID is not set 53# CONFIG_HID is not set
54# CONFIG_USB_SUPPORT is not set 54# CONFIG_USB_SUPPORT is not set
55# CONFIG_IOMMU_SUPPORT is not set 55# CONFIG_IOMMU_SUPPORT is not set
@@ -61,4 +61,3 @@ CONFIG_NFS_FS=y
61# CONFIG_ENABLE_WARN_DEPRECATED is not set 61# CONFIG_ENABLE_WARN_DEPRECATED is not set
62# CONFIG_ENABLE_MUST_CHECK is not set 62# CONFIG_ENABLE_MUST_CHECK is not set
63# CONFIG_DEBUG_PREEMPT is not set 63# CONFIG_DEBUG_PREEMPT is not set
64CONFIG_XZ_DEC=y
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
index 96bd1c20fb0b..bb2a8dc778b5 100644
--- a/arch/arc/configs/nsim_hs_smp_defconfig
+++ b/arch/arc/configs/nsim_hs_smp_defconfig
@@ -2,6 +2,7 @@ CONFIG_CROSS_COMPILE="arc-linux-"
2# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3CONFIG_DEFAULT_HOSTNAME="ARCLinux"
4# CONFIG_SWAP is not set 4# CONFIG_SWAP is not set
5# CONFIG_CROSS_MEMORY_ATTACH is not set
5CONFIG_HIGH_RES_TIMERS=y 6CONFIG_HIGH_RES_TIMERS=y
6CONFIG_IKCONFIG=y 7CONFIG_IKCONFIG=y
7CONFIG_IKCONFIG_PROC=y 8CONFIG_IKCONFIG_PROC=y
@@ -21,13 +22,11 @@ CONFIG_MODULES=y
21# CONFIG_IOSCHED_DEADLINE is not set 22# CONFIG_IOSCHED_DEADLINE is not set
22# CONFIG_IOSCHED_CFQ is not set 23# CONFIG_IOSCHED_CFQ is not set
23CONFIG_ARC_PLAT_SIM=y 24CONFIG_ARC_PLAT_SIM=y
24CONFIG_ARC_BOARD_ML509=y
25CONFIG_ISA_ARCV2=y 25CONFIG_ISA_ARCV2=y
26CONFIG_SMP=y 26CONFIG_SMP=y
27CONFIG_ARC_BUILTIN_DTB_NAME="nsim_hs_idu" 27CONFIG_ARC_BUILTIN_DTB_NAME="nsim_hs_idu"
28CONFIG_PREEMPT=y 28CONFIG_PREEMPT=y
29# CONFIG_COMPACTION is not set 29# CONFIG_COMPACTION is not set
30# CONFIG_CROSS_MEMORY_ATTACH is not set
31CONFIG_NET=y 30CONFIG_NET=y
32CONFIG_PACKET=y 31CONFIG_PACKET=y
33CONFIG_UNIX=y 32CONFIG_UNIX=y
@@ -35,6 +34,7 @@ CONFIG_UNIX_DIAG=y
35CONFIG_NET_KEY=y 34CONFIG_NET_KEY=y
36CONFIG_INET=y 35CONFIG_INET=y
37# CONFIG_IPV6 is not set 36# CONFIG_IPV6 is not set
37CONFIG_DEVTMPFS=y
38# CONFIG_STANDALONE is not set 38# CONFIG_STANDALONE is not set
39# CONFIG_PREVENT_FIRMWARE_BUILD is not set 39# CONFIG_PREVENT_FIRMWARE_BUILD is not set
40# CONFIG_FIRMWARE_IN_KERNEL is not set 40# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -49,7 +49,6 @@ CONFIG_SERIAL_ARC=y
49CONFIG_SERIAL_ARC_CONSOLE=y 49CONFIG_SERIAL_ARC_CONSOLE=y
50# CONFIG_HW_RANDOM is not set 50# CONFIG_HW_RANDOM is not set
51# CONFIG_HWMON is not set 51# CONFIG_HWMON is not set
52# CONFIG_VGA_CONSOLE is not set
53# CONFIG_HID is not set 52# CONFIG_HID is not set
54# CONFIG_USB_SUPPORT is not set 53# CONFIG_USB_SUPPORT is not set
55# CONFIG_IOMMU_SUPPORT is not set 54# CONFIG_IOMMU_SUPPORT is not set
@@ -60,4 +59,3 @@ CONFIG_TMPFS=y
60CONFIG_NFS_FS=y 59CONFIG_NFS_FS=y
61# CONFIG_ENABLE_WARN_DEPRECATED is not set 60# CONFIG_ENABLE_WARN_DEPRECATED is not set
62# CONFIG_ENABLE_MUST_CHECK is not set 61# CONFIG_ENABLE_MUST_CHECK is not set
63CONFIG_XZ_DEC=y
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 31e1d95764ff..646182e93753 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -33,6 +33,7 @@ CONFIG_UNIX_DIAG=y
33CONFIG_NET_KEY=y 33CONFIG_NET_KEY=y
34CONFIG_INET=y 34CONFIG_INET=y
35# CONFIG_IPV6 is not set 35# CONFIG_IPV6 is not set
36CONFIG_DEVTMPFS=y
36# CONFIG_STANDALONE is not set 37# CONFIG_STANDALONE is not set
37# CONFIG_PREVENT_FIRMWARE_BUILD is not set 38# CONFIG_PREVENT_FIRMWARE_BUILD is not set
38# CONFIG_FIRMWARE_IN_KERNEL is not set 39# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -58,7 +59,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
58# CONFIG_HW_RANDOM is not set 59# CONFIG_HW_RANDOM is not set
59# CONFIG_HWMON is not set 60# CONFIG_HWMON is not set
60CONFIG_FB=y 61CONFIG_FB=y
61# CONFIG_VGA_CONSOLE is not set
62CONFIG_FRAMEBUFFER_CONSOLE=y 62CONFIG_FRAMEBUFFER_CONSOLE=y
63CONFIG_LOGO=y 63CONFIG_LOGO=y
64# CONFIG_HID is not set 64# CONFIG_HID is not set
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index fcae66683ca0..ceca2541950d 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -34,12 +34,12 @@ CONFIG_UNIX_DIAG=y
34CONFIG_NET_KEY=y 34CONFIG_NET_KEY=y
35CONFIG_INET=y 35CONFIG_INET=y
36# CONFIG_IPV6 is not set 36# CONFIG_IPV6 is not set
37CONFIG_DEVTMPFS=y
37# CONFIG_STANDALONE is not set 38# CONFIG_STANDALONE is not set
38# CONFIG_PREVENT_FIRMWARE_BUILD is not set 39# CONFIG_PREVENT_FIRMWARE_BUILD is not set
39# CONFIG_FIRMWARE_IN_KERNEL is not set 40# CONFIG_FIRMWARE_IN_KERNEL is not set
40# CONFIG_BLK_DEV is not set 41# CONFIG_BLK_DEV is not set
41CONFIG_NETDEVICES=y 42CONFIG_NETDEVICES=y
42CONFIG_NET_OSCI_LAN=y
43CONFIG_INPUT_EVDEV=y 43CONFIG_INPUT_EVDEV=y
44# CONFIG_MOUSE_PS2_ALPS is not set 44# CONFIG_MOUSE_PS2_ALPS is not set
45# CONFIG_MOUSE_PS2_LOGIPS2PP is not set 45# CONFIG_MOUSE_PS2_LOGIPS2PP is not set
@@ -58,7 +58,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
58# CONFIG_HW_RANDOM is not set 58# CONFIG_HW_RANDOM is not set
59# CONFIG_HWMON is not set 59# CONFIG_HWMON is not set
60CONFIG_FB=y 60CONFIG_FB=y
61# CONFIG_VGA_CONSOLE is not set
62CONFIG_FRAMEBUFFER_CONSOLE=y 61CONFIG_FRAMEBUFFER_CONSOLE=y
63CONFIG_LOGO=y 62CONFIG_LOGO=y
64# CONFIG_HID is not set 63# CONFIG_HID is not set
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index b01b659168ea..4b6da90f6f26 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -2,6 +2,7 @@ CONFIG_CROSS_COMPILE="arc-linux-"
2CONFIG_DEFAULT_HOSTNAME="ARCLinux" 2CONFIG_DEFAULT_HOSTNAME="ARCLinux"
3# CONFIG_SWAP is not set 3# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y 4CONFIG_SYSVIPC=y
5# CONFIG_CROSS_MEMORY_ATTACH is not set
5CONFIG_NO_HZ=y 6CONFIG_NO_HZ=y
6CONFIG_HIGH_RES_TIMERS=y 7CONFIG_HIGH_RES_TIMERS=y
7CONFIG_IKCONFIG=y 8CONFIG_IKCONFIG=y
@@ -18,15 +19,11 @@ CONFIG_MODULES=y
18# CONFIG_IOSCHED_DEADLINE is not set 19# CONFIG_IOSCHED_DEADLINE is not set
19# CONFIG_IOSCHED_CFQ is not set 20# CONFIG_IOSCHED_CFQ is not set
20CONFIG_ARC_PLAT_SIM=y 21CONFIG_ARC_PLAT_SIM=y
21CONFIG_ARC_BOARD_ML509=y
22CONFIG_ISA_ARCV2=y 22CONFIG_ISA_ARCV2=y
23CONFIG_SMP=y 23CONFIG_SMP=y
24CONFIG_ARC_HAS_LL64=y
25# CONFIG_ARC_HAS_RTSC is not set
26CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs_idu" 24CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs_idu"
27CONFIG_PREEMPT=y 25CONFIG_PREEMPT=y
28# CONFIG_COMPACTION is not set 26# CONFIG_COMPACTION is not set
29# CONFIG_CROSS_MEMORY_ATTACH is not set
30CONFIG_NET=y 27CONFIG_NET=y
31CONFIG_PACKET=y 28CONFIG_PACKET=y
32CONFIG_PACKET_DIAG=y 29CONFIG_PACKET_DIAG=y
@@ -40,6 +37,7 @@ CONFIG_INET=y
40# CONFIG_INET_LRO is not set 37# CONFIG_INET_LRO is not set
41# CONFIG_IPV6 is not set 38# CONFIG_IPV6 is not set
42# CONFIG_WIRELESS is not set 39# CONFIG_WIRELESS is not set
40CONFIG_DEVTMPFS=y
43# CONFIG_STANDALONE is not set 41# CONFIG_STANDALONE is not set
44# CONFIG_PREVENT_FIRMWARE_BUILD is not set 42# CONFIG_PREVENT_FIRMWARE_BUILD is not set
45# CONFIG_FIRMWARE_IN_KERNEL is not set 43# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -56,14 +54,11 @@ CONFIG_NETDEVICES=y
56# CONFIG_NET_VENDOR_STMICRO is not set 54# CONFIG_NET_VENDOR_STMICRO is not set
57# CONFIG_NET_VENDOR_VIA is not set 55# CONFIG_NET_VENDOR_VIA is not set
58# CONFIG_NET_VENDOR_WIZNET is not set 56# CONFIG_NET_VENDOR_WIZNET is not set
59CONFIG_NET_OSCI_LAN=y
60# CONFIG_WLAN is not set 57# CONFIG_WLAN is not set
61CONFIG_INPUT_EVDEV=y 58CONFIG_INPUT_EVDEV=y
62CONFIG_MOUSE_PS2_TOUCHKIT=y 59CONFIG_MOUSE_PS2_TOUCHKIT=y
63# CONFIG_SERIO_SERPORT is not set 60# CONFIG_SERIO_SERPORT is not set
64CONFIG_SERIO_LIBPS2=y
65CONFIG_SERIO_ARC_PS2=y 61CONFIG_SERIO_ARC_PS2=y
66CONFIG_VT_HW_CONSOLE_BINDING=y
67# CONFIG_LEGACY_PTYS is not set 62# CONFIG_LEGACY_PTYS is not set
68# CONFIG_DEVKMEM is not set 63# CONFIG_DEVKMEM is not set
69CONFIG_SERIAL_8250=y 64CONFIG_SERIAL_8250=y
@@ -75,9 +70,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
75# CONFIG_HW_RANDOM is not set 70# CONFIG_HW_RANDOM is not set
76# CONFIG_HWMON is not set 71# CONFIG_HWMON is not set
77CONFIG_FB=y 72CONFIG_FB=y
78CONFIG_ARCPGU_RGB888=y
79CONFIG_ARCPGU_DISPTYPE=0
80# CONFIG_VGA_CONSOLE is not set
81CONFIG_FRAMEBUFFER_CONSOLE=y 73CONFIG_FRAMEBUFFER_CONSOLE=y
82CONFIG_LOGO=y 74CONFIG_LOGO=y
83# CONFIG_HID is not set 75# CONFIG_HID is not set
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
index 3b4dc9cebcf1..9b342eaf95ae 100644
--- a/arch/arc/configs/tb10x_defconfig
+++ b/arch/arc/configs/tb10x_defconfig
@@ -3,6 +3,7 @@ CONFIG_CROSS_COMPILE="arc-linux-"
3CONFIG_DEFAULT_HOSTNAME="tb10x" 3CONFIG_DEFAULT_HOSTNAME="tb10x"
4CONFIG_SYSVIPC=y 4CONFIG_SYSVIPC=y
5CONFIG_POSIX_MQUEUE=y 5CONFIG_POSIX_MQUEUE=y
6# CONFIG_CROSS_MEMORY_ATTACH is not set
6CONFIG_HIGH_RES_TIMERS=y 7CONFIG_HIGH_RES_TIMERS=y
7CONFIG_BSD_PROCESS_ACCT=y 8CONFIG_BSD_PROCESS_ACCT=y
8CONFIG_BSD_PROCESS_ACCT_V3=y 9CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -26,12 +27,10 @@ CONFIG_MODULE_UNLOAD=y
26# CONFIG_BLOCK is not set 27# CONFIG_BLOCK is not set
27CONFIG_ARC_PLAT_TB10X=y 28CONFIG_ARC_PLAT_TB10X=y
28CONFIG_ARC_CACHE_LINE_SHIFT=5 29CONFIG_ARC_CACHE_LINE_SHIFT=5
29CONFIG_ARC_STACK_NONEXEC=y
30CONFIG_HZ=250 30CONFIG_HZ=250
31CONFIG_ARC_BUILTIN_DTB_NAME="abilis_tb100_dvk" 31CONFIG_ARC_BUILTIN_DTB_NAME="abilis_tb100_dvk"
32CONFIG_PREEMPT_VOLUNTARY=y 32CONFIG_PREEMPT_VOLUNTARY=y
33# CONFIG_COMPACTION is not set 33# CONFIG_COMPACTION is not set
34# CONFIG_CROSS_MEMORY_ATTACH is not set
35CONFIG_NET=y 34CONFIG_NET=y
36CONFIG_PACKET=y 35CONFIG_PACKET=y
37CONFIG_UNIX=y 36CONFIG_UNIX=y
@@ -44,8 +43,8 @@ CONFIG_IP_MULTICAST=y
44# CONFIG_INET_DIAG is not set 43# CONFIG_INET_DIAG is not set
45# CONFIG_IPV6 is not set 44# CONFIG_IPV6 is not set
46# CONFIG_WIRELESS is not set 45# CONFIG_WIRELESS is not set
46CONFIG_DEVTMPFS=y
47# CONFIG_FIRMWARE_IN_KERNEL is not set 47# CONFIG_FIRMWARE_IN_KERNEL is not set
48CONFIG_PROC_DEVICETREE=y
49CONFIG_NETDEVICES=y 48CONFIG_NETDEVICES=y
50# CONFIG_NET_CADENCE is not set 49# CONFIG_NET_CADENCE is not set
51# CONFIG_NET_VENDOR_BROADCOM is not set 50# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -55,9 +54,6 @@ CONFIG_NETDEVICES=y
55# CONFIG_NET_VENDOR_NATSEMI is not set 54# CONFIG_NET_VENDOR_NATSEMI is not set
56# CONFIG_NET_VENDOR_SEEQ is not set 55# CONFIG_NET_VENDOR_SEEQ is not set
57CONFIG_STMMAC_ETH=y 56CONFIG_STMMAC_ETH=y
58CONFIG_STMMAC_DEBUG_FS=y
59CONFIG_STMMAC_DA=y
60CONFIG_STMMAC_CHAINED=y
61# CONFIG_NET_VENDOR_WIZNET is not set 57# CONFIG_NET_VENDOR_WIZNET is not set
62# CONFIG_WLAN is not set 58# CONFIG_WLAN is not set
63# CONFIG_INPUT is not set 59# CONFIG_INPUT is not set
@@ -91,7 +87,6 @@ CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
91CONFIG_LEDS_TRIGGER_TRANSIENT=y 87CONFIG_LEDS_TRIGGER_TRANSIENT=y
92CONFIG_DMADEVICES=y 88CONFIG_DMADEVICES=y
93CONFIG_DW_DMAC=y 89CONFIG_DW_DMAC=y
94CONFIG_NET_DMA=y
95CONFIG_ASYNC_TX_DMA=y 90CONFIG_ASYNC_TX_DMA=y
96# CONFIG_IOMMU_SUPPORT is not set 91# CONFIG_IOMMU_SUPPORT is not set
97# CONFIG_DNOTIFY is not set 92# CONFIG_DNOTIFY is not set
@@ -100,17 +95,16 @@ CONFIG_TMPFS=y
100CONFIG_CONFIGFS_FS=y 95CONFIG_CONFIGFS_FS=y
101# CONFIG_MISC_FILESYSTEMS is not set 96# CONFIG_MISC_FILESYSTEMS is not set
102# CONFIG_NETWORK_FILESYSTEMS is not set 97# CONFIG_NETWORK_FILESYSTEMS is not set
98CONFIG_DEBUG_INFO=y
103# CONFIG_ENABLE_WARN_DEPRECATED is not set 99# CONFIG_ENABLE_WARN_DEPRECATED is not set
104CONFIG_MAGIC_SYSRQ=y
105CONFIG_STRIP_ASM_SYMS=y 100CONFIG_STRIP_ASM_SYMS=y
106CONFIG_DEBUG_FS=y 101CONFIG_DEBUG_FS=y
107CONFIG_HEADERS_CHECK=y 102CONFIG_HEADERS_CHECK=y
108CONFIG_DEBUG_SECTION_MISMATCH=y 103CONFIG_DEBUG_SECTION_MISMATCH=y
104CONFIG_MAGIC_SYSRQ=y
105CONFIG_DEBUG_MEMORY_INIT=y
106CONFIG_DEBUG_STACKOVERFLOW=y
109CONFIG_DETECT_HUNG_TASK=y 107CONFIG_DETECT_HUNG_TASK=y
110CONFIG_SCHEDSTATS=y 108CONFIG_SCHEDSTATS=y
111CONFIG_TIMER_STATS=y 109CONFIG_TIMER_STATS=y
112CONFIG_DEBUG_INFO=y
113CONFIG_DEBUG_MEMORY_INIT=y
114CONFIG_DEBUG_STACKOVERFLOW=y
115# CONFIG_CRYPTO_ANSI_CPRNG is not set
116# CONFIG_CRYPTO_HW is not set 110# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index fdc5be5b1029..f9f4c6f59fdb 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -10,7 +10,8 @@
10#define _ASM_ARC_ARCREGS_H 10#define _ASM_ARC_ARCREGS_H
11 11
12/* Build Configuration Registers */ 12/* Build Configuration Registers */
13#define ARC_REG_DCCMBASE_BCR 0x61 /* DCCM Base Addr */ 13#define ARC_REG_AUX_DCCM 0x18 /* DCCM Base Addr ARCv2 */
14#define ARC_REG_DCCM_BASE_BUILD 0x61 /* DCCM Base Addr ARCompact */
14#define ARC_REG_CRC_BCR 0x62 15#define ARC_REG_CRC_BCR 0x62
15#define ARC_REG_VECBASE_BCR 0x68 16#define ARC_REG_VECBASE_BCR 0x68
16#define ARC_REG_PERIBASE_BCR 0x69 17#define ARC_REG_PERIBASE_BCR 0x69
@@ -18,10 +19,10 @@
18#define ARC_REG_DPFP_BCR 0x6C /* ARCompact: Dbl Precision FPU */ 19#define ARC_REG_DPFP_BCR 0x6C /* ARCompact: Dbl Precision FPU */
19#define ARC_REG_FP_V2_BCR 0xc8 /* ARCv2 FPU */ 20#define ARC_REG_FP_V2_BCR 0xc8 /* ARCv2 FPU */
20#define ARC_REG_SLC_BCR 0xce 21#define ARC_REG_SLC_BCR 0xce
21#define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */ 22#define ARC_REG_DCCM_BUILD 0x74 /* DCCM size (common) */
22#define ARC_REG_TIMERS_BCR 0x75 23#define ARC_REG_TIMERS_BCR 0x75
23#define ARC_REG_AP_BCR 0x76 24#define ARC_REG_AP_BCR 0x76
24#define ARC_REG_ICCM_BCR 0x78 25#define ARC_REG_ICCM_BUILD 0x78 /* ICCM size (common) */
25#define ARC_REG_XY_MEM_BCR 0x79 26#define ARC_REG_XY_MEM_BCR 0x79
26#define ARC_REG_MAC_BCR 0x7a 27#define ARC_REG_MAC_BCR 0x7a
27#define ARC_REG_MUL_BCR 0x7b 28#define ARC_REG_MUL_BCR 0x7b
@@ -36,6 +37,7 @@
36#define ARC_REG_IRQ_BCR 0xF3 37#define ARC_REG_IRQ_BCR 0xF3
37#define ARC_REG_SMART_BCR 0xFF 38#define ARC_REG_SMART_BCR 0xFF
38#define ARC_REG_CLUSTER_BCR 0xcf 39#define ARC_REG_CLUSTER_BCR 0xcf
40#define ARC_REG_AUX_ICCM 0x208 /* ICCM Base Addr (ARCv2) */
39 41
40/* status32 Bits Positions */ 42/* status32 Bits Positions */
41#define STATUS_AE_BIT 5 /* Exception active */ 43#define STATUS_AE_BIT 5 /* Exception active */
@@ -246,7 +248,7 @@ struct bcr_perip {
246#endif 248#endif
247}; 249};
248 250
249struct bcr_iccm { 251struct bcr_iccm_arcompact {
250#ifdef CONFIG_CPU_BIG_ENDIAN 252#ifdef CONFIG_CPU_BIG_ENDIAN
251 unsigned int base:16, pad:5, sz:3, ver:8; 253 unsigned int base:16, pad:5, sz:3, ver:8;
252#else 254#else
@@ -254,17 +256,15 @@ struct bcr_iccm {
254#endif 256#endif
255}; 257};
256 258
257/* DCCM Base Address Register: ARC_REG_DCCMBASE_BCR */ 259struct bcr_iccm_arcv2 {
258struct bcr_dccm_base {
259#ifdef CONFIG_CPU_BIG_ENDIAN 260#ifdef CONFIG_CPU_BIG_ENDIAN
260 unsigned int addr:24, ver:8; 261 unsigned int pad:8, sz11:4, sz01:4, sz10:4, sz00:4, ver:8;
261#else 262#else
262 unsigned int ver:8, addr:24; 263 unsigned int ver:8, sz00:4, sz10:4, sz01:4, sz11:4, pad:8;
263#endif 264#endif
264}; 265};
265 266
266/* DCCM RAM Configuration Register: ARC_REG_DCCM_BCR */ 267struct bcr_dccm_arcompact {
267struct bcr_dccm {
268#ifdef CONFIG_CPU_BIG_ENDIAN 268#ifdef CONFIG_CPU_BIG_ENDIAN
269 unsigned int res:21, sz:3, ver:8; 269 unsigned int res:21, sz:3, ver:8;
270#else 270#else
@@ -272,6 +272,14 @@ struct bcr_dccm {
272#endif 272#endif
273}; 273};
274 274
275struct bcr_dccm_arcv2 {
276#ifdef CONFIG_CPU_BIG_ENDIAN
277 unsigned int pad2:12, cyc:3, pad1:1, sz1:4, sz0:4, ver:8;
278#else
279 unsigned int ver:8, sz0:4, sz1:4, pad1:1, cyc:3, pad2:12;
280#endif
281};
282
275/* ARCompact: Both SP and DP FPU BCRs have same format */ 283/* ARCompact: Both SP and DP FPU BCRs have same format */
276struct bcr_fp_arcompact { 284struct bcr_fp_arcompact {
277#ifdef CONFIG_CPU_BIG_ENDIAN 285#ifdef CONFIG_CPU_BIG_ENDIAN
@@ -315,9 +323,9 @@ struct bcr_bpu_arcv2 {
315 323
316struct bcr_generic { 324struct bcr_generic {
317#ifdef CONFIG_CPU_BIG_ENDIAN 325#ifdef CONFIG_CPU_BIG_ENDIAN
318 unsigned int pad:24, ver:8; 326 unsigned int info:24, ver:8;
319#else 327#else
320 unsigned int ver:8, pad:24; 328 unsigned int ver:8, info:24;
321#endif 329#endif
322}; 330};
323 331
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index 4fd7d62a6e30..49014f0ef36d 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -16,11 +16,9 @@
16#ifdef CONFIG_ISA_ARCOMPACT 16#ifdef CONFIG_ISA_ARCOMPACT
17#define TIMER0_IRQ 3 17#define TIMER0_IRQ 3
18#define TIMER1_IRQ 4 18#define TIMER1_IRQ 4
19#define IPI_IRQ (NR_CPU_IRQS-1) /* dummy to enable SMP build for up hardware */
20#else 19#else
21#define TIMER0_IRQ 16 20#define TIMER0_IRQ 16
22#define TIMER1_IRQ 17 21#define TIMER1_IRQ 17
23#define IPI_IRQ 19
24#endif 22#endif
25 23
26#include <linux/interrupt.h> 24#include <linux/interrupt.h>
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
index 1fc18ee06cf2..37c2f751eebf 100644
--- a/arch/arc/include/asm/irqflags-arcv2.h
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -22,6 +22,7 @@
22#define AUX_IRQ_CTRL 0x00E 22#define AUX_IRQ_CTRL 0x00E
23#define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */ 23#define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */
24#define AUX_IRQ_LVL_PEND 0x200 /* Pending Intr across all levels */ 24#define AUX_IRQ_LVL_PEND 0x200 /* Pending Intr across all levels */
25#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
25#define AUX_IRQ_PRIORITY 0x206 26#define AUX_IRQ_PRIORITY 0x206
26#define ICAUSE 0x40a 27#define ICAUSE 0x40a
27#define AUX_IRQ_SELECT 0x40b 28#define AUX_IRQ_SELECT 0x40b
@@ -115,6 +116,16 @@ static inline int arch_irqs_disabled(void)
115 return arch_irqs_disabled_flags(arch_local_save_flags()); 116 return arch_irqs_disabled_flags(arch_local_save_flags());
116} 117}
117 118
119static inline void arc_softirq_trigger(int irq)
120{
121 write_aux_reg(AUX_IRQ_HINT, irq);
122}
123
124static inline void arc_softirq_clear(int irq)
125{
126 write_aux_reg(AUX_IRQ_HINT, 0);
127}
128
118#else 129#else
119 130
120.macro IRQ_DISABLE scratch 131.macro IRQ_DISABLE scratch
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index b17830294706..c1264607bbff 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -45,11 +45,12 @@ VECTOR reserved ; Reserved slots
45VECTOR handle_interrupt ; (16) Timer0 45VECTOR handle_interrupt ; (16) Timer0
46VECTOR handle_interrupt ; unused (Timer1) 46VECTOR handle_interrupt ; unused (Timer1)
47VECTOR handle_interrupt ; unused (WDT) 47VECTOR handle_interrupt ; unused (WDT)
48VECTOR handle_interrupt ; (19) ICI (inter core interrupt) 48VECTOR handle_interrupt ; (19) Inter core Interrupt (IPI)
49VECTOR handle_interrupt 49VECTOR handle_interrupt ; (20) perf Interrupt
50VECTOR handle_interrupt 50VECTOR handle_interrupt ; (21) Software Triggered Intr (Self IPI)
51VECTOR handle_interrupt 51VECTOR handle_interrupt ; unused
52VECTOR handle_interrupt ; (23) End of fixed IRQs 52VECTOR handle_interrupt ; (23) unused
53# End of fixed IRQs
53 54
54.rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8 55.rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8
55 VECTOR handle_interrupt 56 VECTOR handle_interrupt
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index 06bcedf19b62..224d1c3aa9c4 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -81,9 +81,6 @@ static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
81{ 81{
82 switch (irq) { 82 switch (irq) {
83 case TIMER0_IRQ: 83 case TIMER0_IRQ:
84#ifdef CONFIG_SMP
85 case IPI_IRQ:
86#endif
87 irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq); 84 irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
88 break; 85 break;
89 default: 86 default:
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index bc771f58fefb..c41c364b926c 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -11,9 +11,13 @@
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/irq.h> 12#include <linux/irq.h>
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include <asm/irqflags-arcv2.h>
14#include <asm/mcip.h> 15#include <asm/mcip.h>
15#include <asm/setup.h> 16#include <asm/setup.h>
16 17
18#define IPI_IRQ 19
19#define SOFTIRQ_IRQ 21
20
17static char smp_cpuinfo_buf[128]; 21static char smp_cpuinfo_buf[128];
18static int idu_detected; 22static int idu_detected;
19 23
@@ -22,6 +26,7 @@ static DEFINE_RAW_SPINLOCK(mcip_lock);
22static void mcip_setup_per_cpu(int cpu) 26static void mcip_setup_per_cpu(int cpu)
23{ 27{
24 smp_ipi_irq_setup(cpu, IPI_IRQ); 28 smp_ipi_irq_setup(cpu, IPI_IRQ);
29 smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
25} 30}
26 31
27static void mcip_ipi_send(int cpu) 32static void mcip_ipi_send(int cpu)
@@ -29,46 +34,44 @@ static void mcip_ipi_send(int cpu)
29 unsigned long flags; 34 unsigned long flags;
30 int ipi_was_pending; 35 int ipi_was_pending;
31 36
37 /* ARConnect can only send IPI to others */
38 if (unlikely(cpu == raw_smp_processor_id())) {
39 arc_softirq_trigger(SOFTIRQ_IRQ);
40 return;
41 }
42
43 raw_spin_lock_irqsave(&mcip_lock, flags);
44
32 /* 45 /*
33 * NOTE: We must spin here if the other cpu hasn't yet 46 * If receiver already has a pending interrupt, elide sending this one.
34 * serviced a previous message. This can burn lots 47 * Linux cross core calling works well with concurrent IPIs
35 * of time, but we MUST follows this protocol or 48 * coalesced into one
36 * ipi messages can be lost!!! 49 * see arch/arc/kernel/smp.c: ipi_send_msg_one()
37 * Also, we must release the lock in this loop because
38 * the other side may get to this same loop and not
39 * be able to ack -- thus causing deadlock.
40 */ 50 */
51 __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
52 ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
53 if (!ipi_was_pending)
54 __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
41 55
42 do {
43 raw_spin_lock_irqsave(&mcip_lock, flags);
44 __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
45 ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
46 if (ipi_was_pending == 0)
47 break; /* break out but keep lock */
48 raw_spin_unlock_irqrestore(&mcip_lock, flags);
49 } while (1);
50
51 __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
52 raw_spin_unlock_irqrestore(&mcip_lock, flags); 56 raw_spin_unlock_irqrestore(&mcip_lock, flags);
53
54#ifdef CONFIG_ARC_IPI_DBG
55 if (ipi_was_pending)
56 pr_info("IPI ACK delayed from cpu %d\n", cpu);
57#endif
58} 57}
59 58
60static void mcip_ipi_clear(int irq) 59static void mcip_ipi_clear(int irq)
61{ 60{
62 unsigned int cpu, c; 61 unsigned int cpu, c;
63 unsigned long flags; 62 unsigned long flags;
64 unsigned int __maybe_unused copy; 63
64 if (unlikely(irq == SOFTIRQ_IRQ)) {
65 arc_softirq_clear(irq);
66 return;
67 }
65 68
66 raw_spin_lock_irqsave(&mcip_lock, flags); 69 raw_spin_lock_irqsave(&mcip_lock, flags);
67 70
68 /* Who sent the IPI */ 71 /* Who sent the IPI */
69 __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0); 72 __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0);
70 73
71 copy = cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */ 74 cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */
72 75
73 /* 76 /*
74 * In rare case, multiple concurrent IPIs sent to same target can 77 * In rare case, multiple concurrent IPIs sent to same target can
@@ -82,12 +85,6 @@ static void mcip_ipi_clear(int irq)
82 } while (cpu); 85 } while (cpu);
83 86
84 raw_spin_unlock_irqrestore(&mcip_lock, flags); 87 raw_spin_unlock_irqrestore(&mcip_lock, flags);
85
86#ifdef CONFIG_ARC_IPI_DBG
87 if (c != __ffs(copy))
88 pr_info("IPIs from %x coalesced to %x\n",
89 copy, raw_smp_processor_id());
90#endif
91} 88}
92 89
93static void mcip_probe_n_setup(void) 90static void mcip_probe_n_setup(void)
@@ -111,10 +108,11 @@ static void mcip_probe_n_setup(void)
111 READ_BCR(ARC_REG_MCIP_BCR, mp); 108 READ_BCR(ARC_REG_MCIP_BCR, mp);
112 109
113 sprintf(smp_cpuinfo_buf, 110 sprintf(smp_cpuinfo_buf,
114 "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n", 111 "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n",
115 mp.ver, mp.num_cores, 112 mp.ver, mp.num_cores,
116 IS_AVAIL1(mp.ipi, "IPI "), 113 IS_AVAIL1(mp.ipi, "IPI "),
117 IS_AVAIL1(mp.idu, "IDU "), 114 IS_AVAIL1(mp.idu, "IDU "),
115 IS_AVAIL1(mp.llm, "LLM "),
118 IS_AVAIL1(mp.dbg, "DEBUG "), 116 IS_AVAIL1(mp.dbg, "DEBUG "),
119 IS_AVAIL1(mp.gfrc, "GFRC")); 117 IS_AVAIL1(mp.gfrc, "GFRC"));
120 118
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index a7edceba5f84..cdc821df1809 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -42,6 +42,53 @@ struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
42 42
43struct cpuinfo_arc cpuinfo_arc700[NR_CPUS]; 43struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
44 44
45static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
46{
47 if (is_isa_arcompact()) {
48 struct bcr_iccm_arcompact iccm;
49 struct bcr_dccm_arcompact dccm;
50
51 READ_BCR(ARC_REG_ICCM_BUILD, iccm);
52 if (iccm.ver) {
53 cpu->iccm.sz = 4096 << iccm.sz; /* 8K to 512K */
54 cpu->iccm.base_addr = iccm.base << 16;
55 }
56
57 READ_BCR(ARC_REG_DCCM_BUILD, dccm);
58 if (dccm.ver) {
59 unsigned long base;
60 cpu->dccm.sz = 2048 << dccm.sz; /* 2K to 256K */
61
62 base = read_aux_reg(ARC_REG_DCCM_BASE_BUILD);
63 cpu->dccm.base_addr = base & ~0xF;
64 }
65 } else {
66 struct bcr_iccm_arcv2 iccm;
67 struct bcr_dccm_arcv2 dccm;
68 unsigned long region;
69
70 READ_BCR(ARC_REG_ICCM_BUILD, iccm);
71 if (iccm.ver) {
72 cpu->iccm.sz = 256 << iccm.sz00; /* 512B to 16M */
73 if (iccm.sz00 == 0xF && iccm.sz01 > 0)
74 cpu->iccm.sz <<= iccm.sz01;
75
76 region = read_aux_reg(ARC_REG_AUX_ICCM);
77 cpu->iccm.base_addr = region & 0xF0000000;
78 }
79
80 READ_BCR(ARC_REG_DCCM_BUILD, dccm);
81 if (dccm.ver) {
82 cpu->dccm.sz = 256 << dccm.sz0;
83 if (dccm.sz0 == 0xF && dccm.sz1 > 0)
84 cpu->dccm.sz <<= dccm.sz1;
85
86 region = read_aux_reg(ARC_REG_AUX_DCCM);
87 cpu->dccm.base_addr = region & 0xF0000000;
88 }
89 }
90}
91
45static void read_arc_build_cfg_regs(void) 92static void read_arc_build_cfg_regs(void)
46{ 93{
47 struct bcr_perip uncached_space; 94 struct bcr_perip uncached_space;
@@ -76,36 +123,11 @@ static void read_arc_build_cfg_regs(void)
76 cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */ 123 cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */
77 cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0; 124 cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0;
78 cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */ 125 cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */
79
80 /* Note that we read the CCM BCRs independent of kernel config
81 * This is to catch the cases where user doesn't know that
82 * CCMs are present in hardware build
83 */
84 {
85 struct bcr_iccm iccm;
86 struct bcr_dccm dccm;
87 struct bcr_dccm_base dccm_base;
88 unsigned int bcr_32bit_val;
89
90 bcr_32bit_val = read_aux_reg(ARC_REG_ICCM_BCR);
91 if (bcr_32bit_val) {
92 iccm = *((struct bcr_iccm *)&bcr_32bit_val);
93 cpu->iccm.base_addr = iccm.base << 16;
94 cpu->iccm.sz = 0x2000 << (iccm.sz - 1);
95 }
96
97 bcr_32bit_val = read_aux_reg(ARC_REG_DCCM_BCR);
98 if (bcr_32bit_val) {
99 dccm = *((struct bcr_dccm *)&bcr_32bit_val);
100 cpu->dccm.sz = 0x800 << (dccm.sz);
101
102 READ_BCR(ARC_REG_DCCMBASE_BCR, dccm_base);
103 cpu->dccm.base_addr = dccm_base.addr << 8;
104 }
105 }
106
107 READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem); 126 READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
108 127
128 /* Read CCM BCRs for boot reporting even if not enabled in Kconfig */
129 read_decode_ccm_bcr(cpu);
130
109 read_decode_mmu_bcr(); 131 read_decode_mmu_bcr();
110 read_decode_cache_bcr(); 132 read_decode_cache_bcr();
111 133
@@ -237,8 +259,6 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
237 259
238 n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt); 260 n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt);
239 } 261 }
240 n += scnprintf(buf + n, len - n, "%s",
241 IS_USED_CFG(CONFIG_ARC_HAS_HW_MPY));
242 } 262 }
243 263
244 n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n", 264 n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n",
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index ef6e9e15b82a..424e937da5c8 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -336,11 +336,8 @@ irqreturn_t do_IPI(int irq, void *dev_id)
336 int rc; 336 int rc;
337 337
338 rc = __do_IPI(msg); 338 rc = __do_IPI(msg);
339#ifdef CONFIG_ARC_IPI_DBG
340 /* IPI received but no valid @msg */
341 if (rc) 339 if (rc)
342 pr_info("IPI with bogus msg %ld in %ld\n", msg, copy); 340 pr_info("IPI with bogus msg %ld in %ld\n", msg, copy);
343#endif
344 pending &= ~(1U << msg); 341 pending &= ~(1U << msg);
345 } while (pending); 342 } while (pending);
346 343
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 7a6a58ef8aaf..43788b1a1ac5 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -195,5 +195,7 @@ CFLAGS_font.o := -Dstatic=
195$(obj)/font.c: $(FONTC) 195$(obj)/font.c: $(FONTC)
196 $(call cmd,shipped) 196 $(call cmd,shipped)
197 197
198AFLAGS_hyp-stub.o := -Wa,-march=armv7-a
199
198$(obj)/hyp-stub.S: $(srctree)/arch/$(SRCARCH)/kernel/hyp-stub.S 200$(obj)/hyp-stub.S: $(srctree)/arch/$(SRCARCH)/kernel/hyp-stub.S
199 $(call cmd,shipped) 201 $(call cmd,shipped)
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
index f3db13d2d90e..0cc150b87b86 100644
--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
+++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
@@ -285,8 +285,10 @@
285 }; 285 };
286}; 286};
287 287
288
289/include/ "tps65217.dtsi"
290
288&tps { 291&tps {
289 compatible = "ti,tps65217";
290 /* 292 /*
291 * Configure pmic to enter OFF-state instead of SLEEP-state ("RTC-only 293 * Configure pmic to enter OFF-state instead of SLEEP-state ("RTC-only
292 * mode") at poweroff. Most BeagleBone versions do not support RTC-only 294 * mode") at poweroff. Most BeagleBone versions do not support RTC-only
@@ -307,17 +309,12 @@
307 ti,pmic-shutdown-controller; 309 ti,pmic-shutdown-controller;
308 310
309 regulators { 311 regulators {
310 #address-cells = <1>;
311 #size-cells = <0>;
312
313 dcdc1_reg: regulator@0 { 312 dcdc1_reg: regulator@0 {
314 reg = <0>;
315 regulator-name = "vdds_dpr"; 313 regulator-name = "vdds_dpr";
316 regulator-always-on; 314 regulator-always-on;
317 }; 315 };
318 316
319 dcdc2_reg: regulator@1 { 317 dcdc2_reg: regulator@1 {
320 reg = <1>;
321 /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */ 318 /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
322 regulator-name = "vdd_mpu"; 319 regulator-name = "vdd_mpu";
323 regulator-min-microvolt = <925000>; 320 regulator-min-microvolt = <925000>;
@@ -327,7 +324,6 @@
327 }; 324 };
328 325
329 dcdc3_reg: regulator@2 { 326 dcdc3_reg: regulator@2 {
330 reg = <2>;
331 /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */ 327 /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
332 regulator-name = "vdd_core"; 328 regulator-name = "vdd_core";
333 regulator-min-microvolt = <925000>; 329 regulator-min-microvolt = <925000>;
@@ -337,25 +333,21 @@
337 }; 333 };
338 334
339 ldo1_reg: regulator@3 { 335 ldo1_reg: regulator@3 {
340 reg = <3>;
341 regulator-name = "vio,vrtc,vdds"; 336 regulator-name = "vio,vrtc,vdds";
342 regulator-always-on; 337 regulator-always-on;
343 }; 338 };
344 339
345 ldo2_reg: regulator@4 { 340 ldo2_reg: regulator@4 {
346 reg = <4>;
347 regulator-name = "vdd_3v3aux"; 341 regulator-name = "vdd_3v3aux";
348 regulator-always-on; 342 regulator-always-on;
349 }; 343 };
350 344
351 ldo3_reg: regulator@5 { 345 ldo3_reg: regulator@5 {
352 reg = <5>;
353 regulator-name = "vdd_1v8"; 346 regulator-name = "vdd_1v8";
354 regulator-always-on; 347 regulator-always-on;
355 }; 348 };
356 349
357 ldo4_reg: regulator@6 { 350 ldo4_reg: regulator@6 {
358 reg = <6>;
359 regulator-name = "vdd_3v3a"; 351 regulator-name = "vdd_3v3a";
360 regulator-always-on; 352 regulator-always-on;
361 }; 353 };
diff --git a/arch/arm/boot/dts/am335x-chilisom.dtsi b/arch/arm/boot/dts/am335x-chilisom.dtsi
index fda457b07e15..857d9894103a 100644
--- a/arch/arm/boot/dts/am335x-chilisom.dtsi
+++ b/arch/arm/boot/dts/am335x-chilisom.dtsi
@@ -128,21 +128,16 @@
128 128
129}; 129};
130 130
131&tps { 131/include/ "tps65217.dtsi"
132 compatible = "ti,tps65217";
133 132
133&tps {
134 regulators { 134 regulators {
135 #address-cells = <1>;
136 #size-cells = <0>;
137
138 dcdc1_reg: regulator@0 { 135 dcdc1_reg: regulator@0 {
139 reg = <0>;
140 regulator-name = "vdds_dpr"; 136 regulator-name = "vdds_dpr";
141 regulator-always-on; 137 regulator-always-on;
142 }; 138 };
143 139
144 dcdc2_reg: regulator@1 { 140 dcdc2_reg: regulator@1 {
145 reg = <1>;
146 /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */ 141 /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
147 regulator-name = "vdd_mpu"; 142 regulator-name = "vdd_mpu";
148 regulator-min-microvolt = <925000>; 143 regulator-min-microvolt = <925000>;
@@ -152,7 +147,6 @@
152 }; 147 };
153 148
154 dcdc3_reg: regulator@2 { 149 dcdc3_reg: regulator@2 {
155 reg = <2>;
156 /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */ 150 /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
157 regulator-name = "vdd_core"; 151 regulator-name = "vdd_core";
158 regulator-min-microvolt = <925000>; 152 regulator-min-microvolt = <925000>;
@@ -162,28 +156,24 @@
162 }; 156 };
163 157
164 ldo1_reg: regulator@3 { 158 ldo1_reg: regulator@3 {
165 reg = <3>;
166 regulator-name = "vio,vrtc,vdds"; 159 regulator-name = "vio,vrtc,vdds";
167 regulator-boot-on; 160 regulator-boot-on;
168 regulator-always-on; 161 regulator-always-on;
169 }; 162 };
170 163
171 ldo2_reg: regulator@4 { 164 ldo2_reg: regulator@4 {
172 reg = <4>;
173 regulator-name = "vdd_3v3aux"; 165 regulator-name = "vdd_3v3aux";
174 regulator-boot-on; 166 regulator-boot-on;
175 regulator-always-on; 167 regulator-always-on;
176 }; 168 };
177 169
178 ldo3_reg: regulator@5 { 170 ldo3_reg: regulator@5 {
179 reg = <5>;
180 regulator-name = "vdd_1v8"; 171 regulator-name = "vdd_1v8";
181 regulator-boot-on; 172 regulator-boot-on;
182 regulator-always-on; 173 regulator-always-on;
183 }; 174 };
184 175
185 ldo4_reg: regulator@6 { 176 ldo4_reg: regulator@6 {
186 reg = <6>;
187 regulator-name = "vdd_3v3d"; 177 regulator-name = "vdd_3v3d";
188 regulator-boot-on; 178 regulator-boot-on;
189 regulator-always-on; 179 regulator-always-on;
diff --git a/arch/arm/boot/dts/am335x-nano.dts b/arch/arm/boot/dts/am335x-nano.dts
index 77559a1ded60..f313999c503e 100644
--- a/arch/arm/boot/dts/am335x-nano.dts
+++ b/arch/arm/boot/dts/am335x-nano.dts
@@ -375,15 +375,11 @@
375 wp-gpios = <&gpio3 18 0>; 375 wp-gpios = <&gpio3 18 0>;
376}; 376};
377 377
378&tps { 378#include "tps65217.dtsi"
379 compatible = "ti,tps65217";
380 379
380&tps {
381 regulators { 381 regulators {
382 #address-cells = <1>;
383 #size-cells = <0>;
384
385 dcdc1_reg: regulator@0 { 382 dcdc1_reg: regulator@0 {
386 reg = <0>;
387 /* +1.5V voltage with ±4% tolerance */ 383 /* +1.5V voltage with ±4% tolerance */
388 regulator-min-microvolt = <1450000>; 384 regulator-min-microvolt = <1450000>;
389 regulator-max-microvolt = <1550000>; 385 regulator-max-microvolt = <1550000>;
@@ -392,7 +388,6 @@
392 }; 388 };
393 389
394 dcdc2_reg: regulator@1 { 390 dcdc2_reg: regulator@1 {
395 reg = <1>;
396 /* VDD_MPU voltage limits 0.95V - 1.1V with ±4% tolerance */ 391 /* VDD_MPU voltage limits 0.95V - 1.1V with ±4% tolerance */
397 regulator-name = "vdd_mpu"; 392 regulator-name = "vdd_mpu";
398 regulator-min-microvolt = <915000>; 393 regulator-min-microvolt = <915000>;
@@ -402,7 +397,6 @@
402 }; 397 };
403 398
404 dcdc3_reg: regulator@2 { 399 dcdc3_reg: regulator@2 {
405 reg = <2>;
406 /* VDD_CORE voltage limits 0.95V - 1.1V with ±4% tolerance */ 400 /* VDD_CORE voltage limits 0.95V - 1.1V with ±4% tolerance */
407 regulator-name = "vdd_core"; 401 regulator-name = "vdd_core";
408 regulator-min-microvolt = <915000>; 402 regulator-min-microvolt = <915000>;
@@ -412,7 +406,6 @@
412 }; 406 };
413 407
414 ldo1_reg: regulator@3 { 408 ldo1_reg: regulator@3 {
415 reg = <3>;
416 /* +1.8V voltage with ±4% tolerance */ 409 /* +1.8V voltage with ±4% tolerance */
417 regulator-min-microvolt = <1750000>; 410 regulator-min-microvolt = <1750000>;
418 regulator-max-microvolt = <1870000>; 411 regulator-max-microvolt = <1870000>;
@@ -421,7 +414,6 @@
421 }; 414 };
422 415
423 ldo2_reg: regulator@4 { 416 ldo2_reg: regulator@4 {
424 reg = <4>;
425 /* +3.3V voltage with ±4% tolerance */ 417 /* +3.3V voltage with ±4% tolerance */
426 regulator-min-microvolt = <3175000>; 418 regulator-min-microvolt = <3175000>;
427 regulator-max-microvolt = <3430000>; 419 regulator-max-microvolt = <3430000>;
@@ -430,7 +422,6 @@
430 }; 422 };
431 423
432 ldo3_reg: regulator@5 { 424 ldo3_reg: regulator@5 {
433 reg = <5>;
434 /* +1.8V voltage with ±4% tolerance */ 425 /* +1.8V voltage with ±4% tolerance */
435 regulator-min-microvolt = <1750000>; 426 regulator-min-microvolt = <1750000>;
436 regulator-max-microvolt = <1870000>; 427 regulator-max-microvolt = <1870000>;
@@ -439,7 +430,6 @@
439 }; 430 };
440 431
441 ldo4_reg: regulator@6 { 432 ldo4_reg: regulator@6 {
442 reg = <6>;
443 /* +3.3V voltage with ±4% tolerance */ 433 /* +3.3V voltage with ±4% tolerance */
444 regulator-min-microvolt = <3175000>; 434 regulator-min-microvolt = <3175000>;
445 regulator-max-microvolt = <3430000>; 435 regulator-max-microvolt = <3430000>;
diff --git a/arch/arm/boot/dts/am335x-pepper.dts b/arch/arm/boot/dts/am335x-pepper.dts
index 471a3a70ea1f..8867aaaec54d 100644
--- a/arch/arm/boot/dts/am335x-pepper.dts
+++ b/arch/arm/boot/dts/am335x-pepper.dts
@@ -420,9 +420,9 @@
420 vin-supply = <&vbat>; 420 vin-supply = <&vbat>;
421}; 421};
422 422
423&tps { 423/include/ "tps65217.dtsi"
424 compatible = "ti,tps65217";
425 424
425&tps {
426 backlight { 426 backlight {
427 isel = <1>; /* ISET1 */ 427 isel = <1>; /* ISET1 */
428 fdim = <200>; /* TPS65217_BL_FDIM_200HZ */ 428 fdim = <200>; /* TPS65217_BL_FDIM_200HZ */
@@ -430,17 +430,12 @@
430 }; 430 };
431 431
432 regulators { 432 regulators {
433 #address-cells = <1>;
434 #size-cells = <0>;
435
436 dcdc1_reg: regulator@0 { 433 dcdc1_reg: regulator@0 {
437 reg = <0>;
438 /* VDD_1V8 system supply */ 434 /* VDD_1V8 system supply */
439 regulator-always-on; 435 regulator-always-on;
440 }; 436 };
441 437
442 dcdc2_reg: regulator@1 { 438 dcdc2_reg: regulator@1 {
443 reg = <1>;
444 /* VDD_CORE voltage limits 0.95V - 1.26V with +/-4% tolerance */ 439 /* VDD_CORE voltage limits 0.95V - 1.26V with +/-4% tolerance */
445 regulator-name = "vdd_core"; 440 regulator-name = "vdd_core";
446 regulator-min-microvolt = <925000>; 441 regulator-min-microvolt = <925000>;
@@ -450,7 +445,6 @@
450 }; 445 };
451 446
452 dcdc3_reg: regulator@2 { 447 dcdc3_reg: regulator@2 {
453 reg = <2>;
454 /* VDD_MPU voltage limits 0.95V - 1.1V with +/-4% tolerance */ 448 /* VDD_MPU voltage limits 0.95V - 1.1V with +/-4% tolerance */
455 regulator-name = "vdd_mpu"; 449 regulator-name = "vdd_mpu";
456 regulator-min-microvolt = <925000>; 450 regulator-min-microvolt = <925000>;
@@ -460,21 +454,18 @@
460 }; 454 };
461 455
462 ldo1_reg: regulator@3 { 456 ldo1_reg: regulator@3 {
463 reg = <3>;
464 /* VRTC 1.8V always-on supply */ 457 /* VRTC 1.8V always-on supply */
465 regulator-name = "vrtc,vdds"; 458 regulator-name = "vrtc,vdds";
466 regulator-always-on; 459 regulator-always-on;
467 }; 460 };
468 461
469 ldo2_reg: regulator@4 { 462 ldo2_reg: regulator@4 {
470 reg = <4>;
471 /* 3.3V rail */ 463 /* 3.3V rail */
472 regulator-name = "vdd_3v3aux"; 464 regulator-name = "vdd_3v3aux";
473 regulator-always-on; 465 regulator-always-on;
474 }; 466 };
475 467
476 ldo3_reg: regulator@5 { 468 ldo3_reg: regulator@5 {
477 reg = <5>;
478 /* VDD_3V3A 3.3V rail */ 469 /* VDD_3V3A 3.3V rail */
479 regulator-name = "vdd_3v3a"; 470 regulator-name = "vdd_3v3a";
480 regulator-min-microvolt = <3300000>; 471 regulator-min-microvolt = <3300000>;
@@ -482,7 +473,6 @@
482 }; 473 };
483 474
484 ldo4_reg: regulator@6 { 475 ldo4_reg: regulator@6 {
485 reg = <6>;
486 /* VDD_3V3B 3.3V rail */ 476 /* VDD_3V3B 3.3V rail */
487 regulator-name = "vdd_3v3b"; 477 regulator-name = "vdd_3v3b";
488 regulator-always-on; 478 regulator-always-on;
diff --git a/arch/arm/boot/dts/am335x-shc.dts b/arch/arm/boot/dts/am335x-shc.dts
index 1b5b044fcd91..865de8500f1c 100644
--- a/arch/arm/boot/dts/am335x-shc.dts
+++ b/arch/arm/boot/dts/am335x-shc.dts
@@ -46,7 +46,7 @@
46 gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>; 46 gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
47 linux,code = <KEY_BACK>; 47 linux,code = <KEY_BACK>;
48 debounce-interval = <1000>; 48 debounce-interval = <1000>;
49 gpio-key,wakeup; 49 wakeup-source;
50 }; 50 };
51 51
52 front_button { 52 front_button {
@@ -54,7 +54,7 @@
54 gpios = <&gpio1 25 GPIO_ACTIVE_HIGH>; 54 gpios = <&gpio1 25 GPIO_ACTIVE_HIGH>;
55 linux,code = <KEY_FRONT>; 55 linux,code = <KEY_FRONT>;
56 debounce-interval = <1000>; 56 debounce-interval = <1000>;
57 gpio-key,wakeup; 57 wakeup-source;
58 }; 58 };
59 }; 59 };
60 60
diff --git a/arch/arm/boot/dts/am335x-sl50.dts b/arch/arm/boot/dts/am335x-sl50.dts
index d38edfa53bb9..3303c281697b 100644
--- a/arch/arm/boot/dts/am335x-sl50.dts
+++ b/arch/arm/boot/dts/am335x-sl50.dts
@@ -375,19 +375,16 @@
375 pinctrl-0 = <&uart4_pins>; 375 pinctrl-0 = <&uart4_pins>;
376}; 376};
377 377
378#include "tps65217.dtsi"
379
378&tps { 380&tps {
379 compatible = "ti,tps65217";
380 ti,pmic-shutdown-controller; 381 ti,pmic-shutdown-controller;
381 382
382 interrupt-parent = <&intc>; 383 interrupt-parent = <&intc>;
383 interrupts = <7>; /* NNMI */ 384 interrupts = <7>; /* NNMI */
384 385
385 regulators { 386 regulators {
386 #address-cells = <1>;
387 #size-cells = <0>;
388
389 dcdc1_reg: regulator@0 { 387 dcdc1_reg: regulator@0 {
390 reg = <0>;
391 /* VDDS_DDR */ 388 /* VDDS_DDR */
392 regulator-min-microvolt = <1500000>; 389 regulator-min-microvolt = <1500000>;
393 regulator-max-microvolt = <1500000>; 390 regulator-max-microvolt = <1500000>;
@@ -395,7 +392,6 @@
395 }; 392 };
396 393
397 dcdc2_reg: regulator@1 { 394 dcdc2_reg: regulator@1 {
398 reg = <1>;
399 /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */ 395 /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
400 regulator-name = "vdd_mpu"; 396 regulator-name = "vdd_mpu";
401 regulator-min-microvolt = <925000>; 397 regulator-min-microvolt = <925000>;
@@ -405,7 +401,6 @@
405 }; 401 };
406 402
407 dcdc3_reg: regulator@2 { 403 dcdc3_reg: regulator@2 {
408 reg = <2>;
409 /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */ 404 /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
410 regulator-name = "vdd_core"; 405 regulator-name = "vdd_core";
411 regulator-min-microvolt = <925000>; 406 regulator-min-microvolt = <925000>;
@@ -415,7 +410,6 @@
415 }; 410 };
416 411
417 ldo1_reg: regulator@3 { 412 ldo1_reg: regulator@3 {
418 reg = <3>;
419 /* VRTC / VIO / VDDS*/ 413 /* VRTC / VIO / VDDS*/
420 regulator-always-on; 414 regulator-always-on;
421 regulator-min-microvolt = <1800000>; 415 regulator-min-microvolt = <1800000>;
@@ -423,7 +417,6 @@
423 }; 417 };
424 418
425 ldo2_reg: regulator@4 { 419 ldo2_reg: regulator@4 {
426 reg = <4>;
427 /* VDD_3V3AUX */ 420 /* VDD_3V3AUX */
428 regulator-always-on; 421 regulator-always-on;
429 regulator-min-microvolt = <3300000>; 422 regulator-min-microvolt = <3300000>;
@@ -431,7 +424,6 @@
431 }; 424 };
432 425
433 ldo3_reg: regulator@5 { 426 ldo3_reg: regulator@5 {
434 reg = <5>;
435 /* VDD_1V8 */ 427 /* VDD_1V8 */
436 regulator-min-microvolt = <1800000>; 428 regulator-min-microvolt = <1800000>;
437 regulator-max-microvolt = <1800000>; 429 regulator-max-microvolt = <1800000>;
@@ -439,7 +431,6 @@
439 }; 431 };
440 432
441 ldo4_reg: regulator@6 { 433 ldo4_reg: regulator@6 {
442 reg = <6>;
443 /* VDD_3V3A */ 434 /* VDD_3V3A */
444 regulator-min-microvolt = <3300000>; 435 regulator-min-microvolt = <3300000>;
445 regulator-max-microvolt = <3300000>; 436 regulator-max-microvolt = <3300000>;
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index 36c0fa6c362a..a0986c65be0c 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -173,6 +173,8 @@
173 173
174 sound0_master: simple-audio-card,codec { 174 sound0_master: simple-audio-card,codec {
175 sound-dai = <&tlv320aic3104>; 175 sound-dai = <&tlv320aic3104>;
176 assigned-clocks = <&clkoutmux2_clk_mux>;
177 assigned-clock-parents = <&sys_clk2_dclk_div>;
176 clocks = <&clkout2_clk>; 178 clocks = <&clkout2_clk>;
177 }; 179 };
178 }; 180 };
@@ -796,6 +798,8 @@
796 pinctrl-names = "default", "sleep"; 798 pinctrl-names = "default", "sleep";
797 pinctrl-0 = <&mcasp3_pins_default>; 799 pinctrl-0 = <&mcasp3_pins_default>;
798 pinctrl-1 = <&mcasp3_pins_sleep>; 800 pinctrl-1 = <&mcasp3_pins_sleep>;
801 assigned-clocks = <&mcasp3_ahclkx_mux>;
802 assigned-clock-parents = <&sys_clkin2>;
799 status = "okay"; 803 status = "okay";
800 804
801 op-mode = <0>; /* MCASP_IIS_MODE */ 805 op-mode = <0>; /* MCASP_IIS_MODE */
diff --git a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
index 8d93882dc8d5..1c06cb76da07 100644
--- a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
+++ b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
@@ -545,7 +545,7 @@
545 ti,debounce-tol = /bits/ 16 <10>; 545 ti,debounce-tol = /bits/ 16 <10>;
546 ti,debounce-rep = /bits/ 16 <1>; 546 ti,debounce-rep = /bits/ 16 <1>;
547 547
548 linux,wakeup; 548 wakeup-source;
549 }; 549 };
550}; 550};
551 551
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index 4f6ae921656f..f74d3db4846d 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -896,7 +896,6 @@
896 #size-cells = <1>; 896 #size-cells = <1>;
897 reg = <0x2100000 0x10000>; 897 reg = <0x2100000 0x10000>;
898 ranges = <0 0x2100000 0x10000>; 898 ranges = <0 0x2100000 0x10000>;
899 interrupt-parent = <&intc>;
900 clocks = <&clks IMX6QDL_CLK_CAAM_MEM>, 899 clocks = <&clks IMX6QDL_CLK_CAAM_MEM>,
901 <&clks IMX6QDL_CLK_CAAM_ACLK>, 900 <&clks IMX6QDL_CLK_CAAM_ACLK>,
902 <&clks IMX6QDL_CLK_CAAM_IPG>, 901 <&clks IMX6QDL_CLK_CAAM_IPG>,
diff --git a/arch/arm/boot/dts/kirkwood-ds112.dts b/arch/arm/boot/dts/kirkwood-ds112.dts
index bf4143c6cb8f..b84af3da8c84 100644
--- a/arch/arm/boot/dts/kirkwood-ds112.dts
+++ b/arch/arm/boot/dts/kirkwood-ds112.dts
@@ -14,7 +14,7 @@
14#include "kirkwood-synology.dtsi" 14#include "kirkwood-synology.dtsi"
15 15
16/ { 16/ {
17 model = "Synology DS111"; 17 model = "Synology DS112";
18 compatible = "synology,ds111", "marvell,kirkwood"; 18 compatible = "synology,ds111", "marvell,kirkwood";
19 19
20 memory { 20 memory {
diff --git a/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts b/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts
index 420788229e6f..aae8a7aceab7 100644
--- a/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts
+++ b/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts
@@ -228,6 +228,37 @@
228 }; 228 };
229}; 229};
230 230
231&devbus_bootcs {
232 status = "okay";
233 devbus,keep-config;
234
235 flash@0 {
236 compatible = "jedec-flash";
237 reg = <0 0x40000>;
238 bank-width = <1>;
239
240 partitions {
241 compatible = "fixed-partitions";
242 #address-cells = <1>;
243 #size-cells = <1>;
244
245 header@0 {
246 reg = <0 0x30000>;
247 read-only;
248 };
249
250 uboot@30000 {
251 reg = <0x30000 0xF000>;
252 read-only;
253 };
254
255 uboot_env@3F000 {
256 reg = <0x3F000 0x1000>;
257 };
258 };
259 };
260};
261
231&mdio { 262&mdio {
232 status = "okay"; 263 status = "okay";
233 264
diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
index 6713b1ea732b..01d239c3eaaa 100644
--- a/arch/arm/boot/dts/r8a7791-porter.dts
+++ b/arch/arm/boot/dts/r8a7791-porter.dts
@@ -283,7 +283,6 @@
283 pinctrl-names = "default"; 283 pinctrl-names = "default";
284 284
285 status = "okay"; 285 status = "okay";
286 renesas,enable-gpio = <&gpio5 31 GPIO_ACTIVE_HIGH>;
287}; 286};
288 287
289&usbphy { 288&usbphy {
diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h
index 1afe24629d1f..b0c912feaa2f 100644
--- a/arch/arm/boot/dts/sama5d2-pinfunc.h
+++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
@@ -90,7 +90,7 @@
90#define PIN_PA14__I2SC1_MCK PINMUX_PIN(PIN_PA14, 4, 2) 90#define PIN_PA14__I2SC1_MCK PINMUX_PIN(PIN_PA14, 4, 2)
91#define PIN_PA14__FLEXCOM3_IO2 PINMUX_PIN(PIN_PA14, 5, 1) 91#define PIN_PA14__FLEXCOM3_IO2 PINMUX_PIN(PIN_PA14, 5, 1)
92#define PIN_PA14__D9 PINMUX_PIN(PIN_PA14, 6, 2) 92#define PIN_PA14__D9 PINMUX_PIN(PIN_PA14, 6, 2)
93#define PIN_PA15 14 93#define PIN_PA15 15
94#define PIN_PA15__GPIO PINMUX_PIN(PIN_PA15, 0, 0) 94#define PIN_PA15__GPIO PINMUX_PIN(PIN_PA15, 0, 0)
95#define PIN_PA15__SPI0_MOSI PINMUX_PIN(PIN_PA15, 1, 1) 95#define PIN_PA15__SPI0_MOSI PINMUX_PIN(PIN_PA15, 1, 1)
96#define PIN_PA15__TF1 PINMUX_PIN(PIN_PA15, 2, 1) 96#define PIN_PA15__TF1 PINMUX_PIN(PIN_PA15, 2, 1)
diff --git a/arch/arm/boot/dts/tps65217.dtsi b/arch/arm/boot/dts/tps65217.dtsi
new file mode 100644
index 000000000000..a63272422d76
--- /dev/null
+++ b/arch/arm/boot/dts/tps65217.dtsi
@@ -0,0 +1,56 @@
1/*
2 * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/*
10 * Integrated Power Management Chip
11 * http://www.ti.com/lit/ds/symlink/tps65217.pdf
12 */
13
14&tps {
15 compatible = "ti,tps65217";
16
17 regulators {
18 #address-cells = <1>;
19 #size-cells = <0>;
20
21 dcdc1_reg: regulator@0 {
22 reg = <0>;
23 regulator-compatible = "dcdc1";
24 };
25
26 dcdc2_reg: regulator@1 {
27 reg = <1>;
28 regulator-compatible = "dcdc2";
29 };
30
31 dcdc3_reg: regulator@2 {
32 reg = <2>;
33 regulator-compatible = "dcdc3";
34 };
35
36 ldo1_reg: regulator@3 {
37 reg = <3>;
38 regulator-compatible = "ldo1";
39 };
40
41 ldo2_reg: regulator@4 {
42 reg = <4>;
43 regulator-compatible = "ldo2";
44 };
45
46 ldo3_reg: regulator@5 {
47 reg = <5>;
48 regulator-compatible = "ldo3";
49 };
50
51 ldo4_reg: regulator@6 {
52 reg = <6>;
53 regulator-compatible = "ldo4";
54 };
55 };
56};
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
index b445a5d56f43..89a3a3e592d6 100644
--- a/arch/arm/crypto/aes-ce-glue.c
+++ b/arch/arm/crypto/aes-ce-glue.c
@@ -364,7 +364,7 @@ static struct crypto_alg aes_algs[] = { {
364 .cra_blkcipher = { 364 .cra_blkcipher = {
365 .min_keysize = AES_MIN_KEY_SIZE, 365 .min_keysize = AES_MIN_KEY_SIZE,
366 .max_keysize = AES_MAX_KEY_SIZE, 366 .max_keysize = AES_MAX_KEY_SIZE,
367 .ivsize = AES_BLOCK_SIZE, 367 .ivsize = 0,
368 .setkey = ce_aes_setkey, 368 .setkey = ce_aes_setkey,
369 .encrypt = ecb_encrypt, 369 .encrypt = ecb_encrypt,
370 .decrypt = ecb_decrypt, 370 .decrypt = ecb_decrypt,
@@ -441,7 +441,7 @@ static struct crypto_alg aes_algs[] = { {
441 .cra_ablkcipher = { 441 .cra_ablkcipher = {
442 .min_keysize = AES_MIN_KEY_SIZE, 442 .min_keysize = AES_MIN_KEY_SIZE,
443 .max_keysize = AES_MAX_KEY_SIZE, 443 .max_keysize = AES_MAX_KEY_SIZE,
444 .ivsize = AES_BLOCK_SIZE, 444 .ivsize = 0,
445 .setkey = ablk_set_key, 445 .setkey = ablk_set_key,
446 .encrypt = ablk_encrypt, 446 .encrypt = ablk_encrypt,
447 .decrypt = ablk_decrypt, 447 .decrypt = ablk_decrypt,
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index 7da5503c0591..e08d15184056 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -117,6 +117,7 @@ static inline u32 gic_read_iar(void)
117 u32 irqstat; 117 u32 irqstat;
118 118
119 asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat)); 119 asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat));
120 dsb(sy);
120 return irqstat; 121 return irqstat;
121} 122}
122 123
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index 0375c8caa061..9408a994cc91 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -35,14 +35,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
35 dma_addr_t dev_addr, unsigned long offset, size_t size, 35 dma_addr_t dev_addr, unsigned long offset, size_t size,
36 enum dma_data_direction dir, struct dma_attrs *attrs) 36 enum dma_data_direction dir, struct dma_attrs *attrs)
37{ 37{
38 bool local = XEN_PFN_DOWN(dev_addr) == page_to_xen_pfn(page); 38 unsigned long page_pfn = page_to_xen_pfn(page);
39 unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
40 unsigned long compound_pages =
41 (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
42 bool local = (page_pfn <= dev_pfn) &&
43 (dev_pfn - page_pfn < compound_pages);
44
39 /* 45 /*
40 * Dom0 is mapped 1:1, while the Linux page can be spanned accross 46 * Dom0 is mapped 1:1, while the Linux page can span across
41 * multiple Xen page, it's not possible to have a mix of local and 47 * multiple Xen pages, it's not possible for it to contain a
42 * foreign Xen page. So if the first xen_pfn == mfn the page is local 48 * mix of local and foreign Xen pages. So if the first xen_pfn
43 * otherwise it's a foreign page grant-mapped in dom0. If the page is 49 * == mfn the page is local otherwise it's a foreign page
44 * local we can safely call the native dma_ops function, otherwise we 50 * grant-mapped in dom0. If the page is local we can safely
45 * call the xen specific function. 51 * call the native dma_ops function, otherwise we call the xen
52 * specific function.
46 */ 53 */
47 if (local) 54 if (local)
48 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); 55 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 2c5f160be65e..ad325a8c7e1e 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -88,6 +88,7 @@ obj-$(CONFIG_DEBUG_LL) += debug.o
88obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 88obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
89 89
90obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o 90obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
91AFLAGS_hyp-stub.o :=-Wa,-march=armv7-a
91ifeq ($(CONFIG_ARM_PSCI),y) 92ifeq ($(CONFIG_ARM_PSCI),y)
92obj-$(CONFIG_SMP) += psci_smp.o 93obj-$(CONFIG_SMP) += psci_smp.o
93endif 94endif
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 5fa69d7bae58..99361f11354a 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -161,7 +161,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
161 u64 val; 161 u64 val;
162 162
163 val = kvm_arm_timer_get_reg(vcpu, reg->id); 163 val = kvm_arm_timer_get_reg(vcpu, reg->id);
164 return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)); 164 return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
165} 165}
166 166
167static unsigned long num_core_regs(void) 167static unsigned long num_core_regs(void)
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 7f33b2056ae6..0f6600f05137 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -206,7 +206,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
206 run->mmio.is_write = is_write; 206 run->mmio.is_write = is_write;
207 run->mmio.phys_addr = fault_ipa; 207 run->mmio.phys_addr = fault_ipa;
208 run->mmio.len = len; 208 run->mmio.len = len;
209 memcpy(run->mmio.data, data_buf, len); 209 if (is_write)
210 memcpy(run->mmio.data, data_buf, len);
210 211
211 if (!ret) { 212 if (!ret) {
212 /* We handled the access successfully in the kernel. */ 213 /* We handled the access successfully in the kernel. */
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 809827265fb3..bab814d2f37d 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -18,6 +18,7 @@
18 18
19#include <asm/setup.h> 19#include <asm/setup.h>
20#include <asm/mach/arch.h> 20#include <asm/mach/arch.h>
21#include <asm/system_info.h>
21 22
22#include "common.h" 23#include "common.h"
23 24
@@ -77,12 +78,31 @@ static const char *const n900_boards_compat[] __initconst = {
77 NULL, 78 NULL,
78}; 79};
79 80
81/* Set system_rev from atags */
82static void __init rx51_set_system_rev(const struct tag *tags)
83{
84 const struct tag *tag;
85
86 if (tags->hdr.tag != ATAG_CORE)
87 return;
88
89 for_each_tag(tag, tags) {
90 if (tag->hdr.tag == ATAG_REVISION) {
91 system_rev = tag->u.revision.rev;
92 break;
93 }
94 }
95}
96
80/* Legacy userspace on Nokia N900 needs ATAGS exported in /proc/atags, 97/* Legacy userspace on Nokia N900 needs ATAGS exported in /proc/atags,
81 * save them while the data is still not overwritten 98 * save them while the data is still not overwritten
82 */ 99 */
83static void __init rx51_reserve(void) 100static void __init rx51_reserve(void)
84{ 101{
85 save_atags((const struct tag *)(PAGE_OFFSET + 0x100)); 102 const struct tag *tags = (const struct tag *)(PAGE_OFFSET + 0x100);
103
104 save_atags(tags);
105 rx51_set_system_rev(tags);
86 omap_reserve(); 106 omap_reserve();
87} 107}
88 108
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
index 7b76ce01c21d..8633c703546a 100644
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
@@ -101,10 +101,8 @@ static void omap2_onenand_set_async_mode(void __iomem *onenand_base)
101 101
102static void set_onenand_cfg(void __iomem *onenand_base) 102static void set_onenand_cfg(void __iomem *onenand_base)
103{ 103{
104 u32 reg; 104 u32 reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
105 105
106 reg = readw(onenand_base + ONENAND_REG_SYS_CFG1);
107 reg &= ~((0x7 << ONENAND_SYS_CFG1_BRL_SHIFT) | (0x7 << 9));
108 reg |= (latency << ONENAND_SYS_CFG1_BRL_SHIFT) | 106 reg |= (latency << ONENAND_SYS_CFG1_BRL_SHIFT) |
109 ONENAND_SYS_CFG1_BL_16; 107 ONENAND_SYS_CFG1_BL_16;
110 if (onenand_flags & ONENAND_FLAG_SYNCREAD) 108 if (onenand_flags & ONENAND_FLAG_SYNCREAD)
@@ -123,6 +121,7 @@ static void set_onenand_cfg(void __iomem *onenand_base)
123 reg |= ONENAND_SYS_CFG1_VHF; 121 reg |= ONENAND_SYS_CFG1_VHF;
124 else 122 else
125 reg &= ~ONENAND_SYS_CFG1_VHF; 123 reg &= ~ONENAND_SYS_CFG1_VHF;
124
126 writew(reg, onenand_base + ONENAND_REG_SYS_CFG1); 125 writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
127} 126}
128 127
@@ -289,6 +288,7 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base)
289 } 288 }
290 } 289 }
291 290
291 onenand_async.sync_write = true;
292 omap2_onenand_calc_async_timings(&t); 292 omap2_onenand_calc_async_timings(&t);
293 293
294 ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_async); 294 ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_async);
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 0437537751bc..f7ff3b9dad87 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -191,12 +191,22 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
191{ 191{
192 struct platform_device *pdev = to_platform_device(dev); 192 struct platform_device *pdev = to_platform_device(dev);
193 struct omap_device *od; 193 struct omap_device *od;
194 int err;
194 195
195 switch (event) { 196 switch (event) {
196 case BUS_NOTIFY_DEL_DEVICE: 197 case BUS_NOTIFY_DEL_DEVICE:
197 if (pdev->archdata.od) 198 if (pdev->archdata.od)
198 omap_device_delete(pdev->archdata.od); 199 omap_device_delete(pdev->archdata.od);
199 break; 200 break;
201 case BUS_NOTIFY_UNBOUND_DRIVER:
202 od = to_omap_device(pdev);
203 if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED)) {
204 dev_info(dev, "enabled after unload, idling\n");
205 err = omap_device_idle(pdev);
206 if (err)
207 dev_err(dev, "failed to idle\n");
208 }
209 break;
200 case BUS_NOTIFY_ADD_DEVICE: 210 case BUS_NOTIFY_ADD_DEVICE:
201 if (pdev->dev.of_node) 211 if (pdev->dev.of_node)
202 omap_device_build_from_dt(pdev); 212 omap_device_build_from_dt(pdev);
@@ -602,8 +612,10 @@ static int _od_runtime_resume(struct device *dev)
602 int ret; 612 int ret;
603 613
604 ret = omap_device_enable(pdev); 614 ret = omap_device_enable(pdev);
605 if (ret) 615 if (ret) {
616 dev_err(dev, "use pm_runtime_put_sync_suspend() in driver?\n");
606 return ret; 617 return ret;
618 }
607 619
608 return pm_generic_runtime_resume(dev); 620 return pm_generic_runtime_resume(dev);
609} 621}
diff --git a/arch/arm/mach-shmobile/common.h b/arch/arm/mach-shmobile/common.h
index 9cb11215ceba..b3a4ed5289ec 100644
--- a/arch/arm/mach-shmobile/common.h
+++ b/arch/arm/mach-shmobile/common.h
@@ -4,7 +4,6 @@
4extern void shmobile_init_delay(void); 4extern void shmobile_init_delay(void);
5extern void shmobile_boot_vector(void); 5extern void shmobile_boot_vector(void);
6extern unsigned long shmobile_boot_fn; 6extern unsigned long shmobile_boot_fn;
7extern unsigned long shmobile_boot_arg;
8extern unsigned long shmobile_boot_size; 7extern unsigned long shmobile_boot_size;
9extern void shmobile_smp_boot(void); 8extern void shmobile_smp_boot(void);
10extern void shmobile_smp_sleep(void); 9extern void shmobile_smp_sleep(void);
diff --git a/arch/arm/mach-shmobile/headsmp-scu.S b/arch/arm/mach-shmobile/headsmp-scu.S
index fa5248c52399..5e503d91ad70 100644
--- a/arch/arm/mach-shmobile/headsmp-scu.S
+++ b/arch/arm/mach-shmobile/headsmp-scu.S
@@ -38,9 +38,3 @@ ENTRY(shmobile_boot_scu)
38 38
39 b secondary_startup 39 b secondary_startup
40ENDPROC(shmobile_boot_scu) 40ENDPROC(shmobile_boot_scu)
41
42 .text
43 .align 2
44 .globl shmobile_scu_base
45shmobile_scu_base:
46 .space 4
diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S
index 330c1fc63197..32e0bf6e3ccb 100644
--- a/arch/arm/mach-shmobile/headsmp.S
+++ b/arch/arm/mach-shmobile/headsmp.S
@@ -24,7 +24,6 @@
24 .arm 24 .arm
25 .align 12 25 .align 12
26ENTRY(shmobile_boot_vector) 26ENTRY(shmobile_boot_vector)
27 ldr r0, 2f
28 ldr r1, 1f 27 ldr r1, 1f
29 bx r1 28 bx r1
30 29
@@ -34,9 +33,6 @@ ENDPROC(shmobile_boot_vector)
34 .globl shmobile_boot_fn 33 .globl shmobile_boot_fn
35shmobile_boot_fn: 34shmobile_boot_fn:
361: .space 4 351: .space 4
37 .globl shmobile_boot_arg
38shmobile_boot_arg:
392: .space 4
40 .globl shmobile_boot_size 36 .globl shmobile_boot_size
41shmobile_boot_size: 37shmobile_boot_size:
42 .long . - shmobile_boot_vector 38 .long . - shmobile_boot_vector
@@ -46,13 +42,15 @@ shmobile_boot_size:
46 */ 42 */
47 43
48ENTRY(shmobile_smp_boot) 44ENTRY(shmobile_smp_boot)
49 @ r0 = MPIDR_HWID_BITMASK
50 mrc p15, 0, r1, c0, c0, 5 @ r1 = MPIDR 45 mrc p15, 0, r1, c0, c0, 5 @ r1 = MPIDR
51 and r0, r1, r0 @ r0 = cpu_logical_map() value 46 and r0, r1, #0xffffff @ MPIDR_HWID_BITMASK
47 @ r0 = cpu_logical_map() value
52 mov r1, #0 @ r1 = CPU index 48 mov r1, #0 @ r1 = CPU index
53 adr r5, 1f @ array of per-cpu mpidr values 49 adr r2, 1f
54 adr r6, 2f @ array of per-cpu functions 50 ldmia r2, {r5, r6, r7}
55 adr r7, 3f @ array of per-cpu arguments 51 add r5, r5, r2 @ array of per-cpu mpidr values
52 add r6, r6, r2 @ array of per-cpu functions
53 add r7, r7, r2 @ array of per-cpu arguments
56 54
57shmobile_smp_boot_find_mpidr: 55shmobile_smp_boot_find_mpidr:
58 ldr r8, [r5, r1, lsl #2] 56 ldr r8, [r5, r1, lsl #2]
@@ -80,12 +78,18 @@ ENTRY(shmobile_smp_sleep)
80 b shmobile_smp_boot 78 b shmobile_smp_boot
81ENDPROC(shmobile_smp_sleep) 79ENDPROC(shmobile_smp_sleep)
82 80
81 .align 2
821: .long shmobile_smp_mpidr - .
83 .long shmobile_smp_fn - 1b
84 .long shmobile_smp_arg - 1b
85
86 .bss
83 .globl shmobile_smp_mpidr 87 .globl shmobile_smp_mpidr
84shmobile_smp_mpidr: 88shmobile_smp_mpidr:
851: .space NR_CPUS * 4 89 .space NR_CPUS * 4
86 .globl shmobile_smp_fn 90 .globl shmobile_smp_fn
87shmobile_smp_fn: 91shmobile_smp_fn:
882: .space NR_CPUS * 4 92 .space NR_CPUS * 4
89 .globl shmobile_smp_arg 93 .globl shmobile_smp_arg
90shmobile_smp_arg: 94shmobile_smp_arg:
913: .space NR_CPUS * 4 95 .space NR_CPUS * 4
diff --git a/arch/arm/mach-shmobile/platsmp-apmu.c b/arch/arm/mach-shmobile/platsmp-apmu.c
index 911884f7e28b..aba75c89f9c1 100644
--- a/arch/arm/mach-shmobile/platsmp-apmu.c
+++ b/arch/arm/mach-shmobile/platsmp-apmu.c
@@ -123,7 +123,6 @@ void __init shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus,
123{ 123{
124 /* install boot code shared by all CPUs */ 124 /* install boot code shared by all CPUs */
125 shmobile_boot_fn = virt_to_phys(shmobile_smp_boot); 125 shmobile_boot_fn = virt_to_phys(shmobile_smp_boot);
126 shmobile_boot_arg = MPIDR_HWID_BITMASK;
127 126
128 /* perform per-cpu setup */ 127 /* perform per-cpu setup */
129 apmu_parse_cfg(apmu_init_cpu, apmu_config, num); 128 apmu_parse_cfg(apmu_init_cpu, apmu_config, num);
diff --git a/arch/arm/mach-shmobile/platsmp-scu.c b/arch/arm/mach-shmobile/platsmp-scu.c
index 64663110ab6c..081a097c9219 100644
--- a/arch/arm/mach-shmobile/platsmp-scu.c
+++ b/arch/arm/mach-shmobile/platsmp-scu.c
@@ -17,6 +17,9 @@
17#include <asm/smp_scu.h> 17#include <asm/smp_scu.h>
18#include "common.h" 18#include "common.h"
19 19
20
21void __iomem *shmobile_scu_base;
22
20static int shmobile_smp_scu_notifier_call(struct notifier_block *nfb, 23static int shmobile_smp_scu_notifier_call(struct notifier_block *nfb,
21 unsigned long action, void *hcpu) 24 unsigned long action, void *hcpu)
22{ 25{
@@ -41,7 +44,6 @@ void __init shmobile_smp_scu_prepare_cpus(unsigned int max_cpus)
41{ 44{
42 /* install boot code shared by all CPUs */ 45 /* install boot code shared by all CPUs */
43 shmobile_boot_fn = virt_to_phys(shmobile_smp_boot); 46 shmobile_boot_fn = virt_to_phys(shmobile_smp_boot);
44 shmobile_boot_arg = MPIDR_HWID_BITMASK;
45 47
46 /* enable SCU and cache coherency on booting CPU */ 48 /* enable SCU and cache coherency on booting CPU */
47 scu_enable(shmobile_scu_base); 49 scu_enable(shmobile_scu_base);
diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c
index b854fe2095ad..0b024a9dbd43 100644
--- a/arch/arm/mach-shmobile/smp-r8a7779.c
+++ b/arch/arm/mach-shmobile/smp-r8a7779.c
@@ -92,8 +92,6 @@ static void __init r8a7779_smp_prepare_cpus(unsigned int max_cpus)
92{ 92{
93 /* Map the reset vector (in headsmp-scu.S, headsmp.S) */ 93 /* Map the reset vector (in headsmp-scu.S, headsmp.S) */
94 __raw_writel(__pa(shmobile_boot_vector), AVECR); 94 __raw_writel(__pa(shmobile_boot_vector), AVECR);
95 shmobile_boot_fn = virt_to_phys(shmobile_boot_scu);
96 shmobile_boot_arg = (unsigned long)shmobile_scu_base;
97 95
98 /* setup r8a7779 specific SCU bits */ 96 /* setup r8a7779 specific SCU bits */
99 shmobile_scu_base = IOMEM(R8A7779_SCU_BASE); 97 shmobile_scu_base = IOMEM(R8A7779_SCU_BASE);
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 4b4058db0781..66353caa35b9 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -173,7 +173,7 @@ unsigned long arch_mmap_rnd(void)
173{ 173{
174 unsigned long rnd; 174 unsigned long rnd;
175 175
176 rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1); 176 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
177 177
178 return rnd << PAGE_SHIFT; 178 return rnd << PAGE_SHIFT;
179} 179}
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
index cf30daff8932..d19b1ad29b07 100644
--- a/arch/arm/mm/pageattr.c
+++ b/arch/arm/mm/pageattr.c
@@ -49,6 +49,9 @@ static int change_memory_common(unsigned long addr, int numpages,
49 WARN_ON_ONCE(1); 49 WARN_ON_ONCE(1);
50 } 50 }
51 51
52 if (!numpages)
53 return 0;
54
52 if (start < MODULES_VADDR || start >= MODULES_END) 55 if (start < MODULES_VADDR || start >= MODULES_END)
53 return -EINVAL; 56 return -EINVAL;
54 57
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 307237cfe728..b5e3f6d42b88 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -88,7 +88,7 @@ Image: vmlinux
88Image.%: vmlinux 88Image.%: vmlinux
89 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 89 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
90 90
91zinstall install: vmlinux 91zinstall install:
92 $(Q)$(MAKE) $(build)=$(boot) $@ 92 $(Q)$(MAKE) $(build)=$(boot) $@
93 93
94%.dtb: scripts 94%.dtb: scripts
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index abcbba2f01ba..305c552b5ec1 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -34,10 +34,10 @@ $(obj)/Image.lzma: $(obj)/Image FORCE
34$(obj)/Image.lzo: $(obj)/Image FORCE 34$(obj)/Image.lzo: $(obj)/Image FORCE
35 $(call if_changed,lzo) 35 $(call if_changed,lzo)
36 36
37install: $(obj)/Image 37install:
38 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 38 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
39 $(obj)/Image System.map "$(INSTALL_PATH)" 39 $(obj)/Image System.map "$(INSTALL_PATH)"
40 40
41zinstall: $(obj)/Image.gz 41zinstall:
42 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 42 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
43 $(obj)/Image.gz System.map "$(INSTALL_PATH)" 43 $(obj)/Image.gz System.map "$(INSTALL_PATH)"
diff --git a/arch/arm64/boot/install.sh b/arch/arm64/boot/install.sh
index 12ed78aa6f0c..d91e1f022573 100644
--- a/arch/arm64/boot/install.sh
+++ b/arch/arm64/boot/install.sh
@@ -20,6 +20,20 @@
20# $4 - default install path (blank if root directory) 20# $4 - default install path (blank if root directory)
21# 21#
22 22
23verify () {
24 if [ ! -f "$1" ]; then
25 echo "" 1>&2
26 echo " *** Missing file: $1" 1>&2
27 echo ' *** You need to run "make" before "make install".' 1>&2
28 echo "" 1>&2
29 exit 1
30 fi
31}
32
33# Make sure the files actually exist
34verify "$2"
35verify "$3"
36
23# User may have a custom install script 37# User may have a custom install script
24if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi 38if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
25if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi 39if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 05d9e16c0dfd..7a3d22a46faf 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -294,7 +294,7 @@ static struct crypto_alg aes_algs[] = { {
294 .cra_blkcipher = { 294 .cra_blkcipher = {
295 .min_keysize = AES_MIN_KEY_SIZE, 295 .min_keysize = AES_MIN_KEY_SIZE,
296 .max_keysize = AES_MAX_KEY_SIZE, 296 .max_keysize = AES_MAX_KEY_SIZE,
297 .ivsize = AES_BLOCK_SIZE, 297 .ivsize = 0,
298 .setkey = aes_setkey, 298 .setkey = aes_setkey,
299 .encrypt = ecb_encrypt, 299 .encrypt = ecb_encrypt,
300 .decrypt = ecb_decrypt, 300 .decrypt = ecb_decrypt,
@@ -371,7 +371,7 @@ static struct crypto_alg aes_algs[] = { {
371 .cra_ablkcipher = { 371 .cra_ablkcipher = {
372 .min_keysize = AES_MIN_KEY_SIZE, 372 .min_keysize = AES_MIN_KEY_SIZE,
373 .max_keysize = AES_MAX_KEY_SIZE, 373 .max_keysize = AES_MAX_KEY_SIZE,
374 .ivsize = AES_BLOCK_SIZE, 374 .ivsize = 0,
375 .setkey = ablk_set_key, 375 .setkey = ablk_set_key,
376 .encrypt = ablk_encrypt, 376 .encrypt = ablk_encrypt,
377 .decrypt = ablk_decrypt, 377 .decrypt = ablk_decrypt,
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index bef6e9243c63..d201d4b396d1 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -107,8 +107,6 @@
107#define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \ 107#define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \
108 TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ) 108 TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ)
109 109
110#define TCR_EL2_FLAGS (TCR_EL2_RES1 | TCR_EL2_PS_40B)
111
112/* VTCR_EL2 Registers bits */ 110/* VTCR_EL2 Registers bits */
113#define VTCR_EL2_RES1 (1 << 31) 111#define VTCR_EL2_RES1 (1 << 31)
114#define VTCR_EL2_PS_MASK (7 << 16) 112#define VTCR_EL2_PS_MASK (7 << 16)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index bf464de33f52..f50608674580 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -34,13 +34,13 @@
34/* 34/*
35 * VMALLOC and SPARSEMEM_VMEMMAP ranges. 35 * VMALLOC and SPARSEMEM_VMEMMAP ranges.
36 * 36 *
37 * VMEMAP_SIZE: allows the whole VA space to be covered by a struct page array 37 * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array
38 * (rounded up to PUD_SIZE). 38 * (rounded up to PUD_SIZE).
39 * VMALLOC_START: beginning of the kernel VA space 39 * VMALLOC_START: beginning of the kernel VA space
40 * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space, 40 * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
41 * fixed mappings and modules 41 * fixed mappings and modules
42 */ 42 */
43#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE) 43#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT - 1)) * sizeof(struct page), PUD_SIZE)
44 44
45#ifndef CONFIG_KASAN 45#ifndef CONFIG_KASAN
46#define VMALLOC_START (VA_START) 46#define VMALLOC_START (VA_START)
@@ -51,7 +51,8 @@
51 51
52#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) 52#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
53 53
54#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K)) 54#define VMEMMAP_START (VMALLOC_END + SZ_64K)
55#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
55 56
56#define FIRST_USER_ADDRESS 0UL 57#define FIRST_USER_ADDRESS 0UL
57 58
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 8aee3aeec3e6..c536c9e307b9 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -226,11 +226,28 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr)
226 return retval; 226 return retval;
227} 227}
228 228
229static void send_user_sigtrap(int si_code)
230{
231 struct pt_regs *regs = current_pt_regs();
232 siginfo_t info = {
233 .si_signo = SIGTRAP,
234 .si_errno = 0,
235 .si_code = si_code,
236 .si_addr = (void __user *)instruction_pointer(regs),
237 };
238
239 if (WARN_ON(!user_mode(regs)))
240 return;
241
242 if (interrupts_enabled(regs))
243 local_irq_enable();
244
245 force_sig_info(SIGTRAP, &info, current);
246}
247
229static int single_step_handler(unsigned long addr, unsigned int esr, 248static int single_step_handler(unsigned long addr, unsigned int esr,
230 struct pt_regs *regs) 249 struct pt_regs *regs)
231{ 250{
232 siginfo_t info;
233
234 /* 251 /*
235 * If we are stepping a pending breakpoint, call the hw_breakpoint 252 * If we are stepping a pending breakpoint, call the hw_breakpoint
236 * handler first. 253 * handler first.
@@ -239,11 +256,7 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
239 return 0; 256 return 0;
240 257
241 if (user_mode(regs)) { 258 if (user_mode(regs)) {
242 info.si_signo = SIGTRAP; 259 send_user_sigtrap(TRAP_HWBKPT);
243 info.si_errno = 0;
244 info.si_code = TRAP_HWBKPT;
245 info.si_addr = (void __user *)instruction_pointer(regs);
246 force_sig_info(SIGTRAP, &info, current);
247 260
248 /* 261 /*
249 * ptrace will disable single step unless explicitly 262 * ptrace will disable single step unless explicitly
@@ -307,17 +320,8 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
307static int brk_handler(unsigned long addr, unsigned int esr, 320static int brk_handler(unsigned long addr, unsigned int esr,
308 struct pt_regs *regs) 321 struct pt_regs *regs)
309{ 322{
310 siginfo_t info;
311
312 if (user_mode(regs)) { 323 if (user_mode(regs)) {
313 info = (siginfo_t) { 324 send_user_sigtrap(TRAP_BRKPT);
314 .si_signo = SIGTRAP,
315 .si_errno = 0,
316 .si_code = TRAP_BRKPT,
317 .si_addr = (void __user *)instruction_pointer(regs),
318 };
319
320 force_sig_info(SIGTRAP, &info, current);
321 } else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) { 325 } else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
322 pr_warning("Unexpected kernel BRK exception at EL1\n"); 326 pr_warning("Unexpected kernel BRK exception at EL1\n");
323 return -EFAULT; 327 return -EFAULT;
@@ -328,7 +332,6 @@ static int brk_handler(unsigned long addr, unsigned int esr,
328 332
329int aarch32_break_handler(struct pt_regs *regs) 333int aarch32_break_handler(struct pt_regs *regs)
330{ 334{
331 siginfo_t info;
332 u32 arm_instr; 335 u32 arm_instr;
333 u16 thumb_instr; 336 u16 thumb_instr;
334 bool bp = false; 337 bool bp = false;
@@ -359,14 +362,7 @@ int aarch32_break_handler(struct pt_regs *regs)
359 if (!bp) 362 if (!bp)
360 return -EFAULT; 363 return -EFAULT;
361 364
362 info = (siginfo_t) { 365 send_user_sigtrap(TRAP_BRKPT);
363 .si_signo = SIGTRAP,
364 .si_errno = 0,
365 .si_code = TRAP_BRKPT,
366 .si_addr = pc,
367 };
368
369 force_sig_info(SIGTRAP, &info, current);
370 return 0; 366 return 0;
371} 367}
372 368
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index 999633bd7294..352f7abd91c9 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -89,6 +89,7 @@ __efistub_memcpy = KALLSYMS_HIDE(__pi_memcpy);
89__efistub_memmove = KALLSYMS_HIDE(__pi_memmove); 89__efistub_memmove = KALLSYMS_HIDE(__pi_memmove);
90__efistub_memset = KALLSYMS_HIDE(__pi_memset); 90__efistub_memset = KALLSYMS_HIDE(__pi_memset);
91__efistub_strlen = KALLSYMS_HIDE(__pi_strlen); 91__efistub_strlen = KALLSYMS_HIDE(__pi_strlen);
92__efistub_strnlen = KALLSYMS_HIDE(__pi_strnlen);
92__efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp); 93__efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp);
93__efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp); 94__efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp);
94__efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area); 95__efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area);
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 4fad9787ab46..d9751a4769e7 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -44,14 +44,13 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
44 unsigned long irq_stack_ptr; 44 unsigned long irq_stack_ptr;
45 45
46 /* 46 /*
47 * Use raw_smp_processor_id() to avoid false-positives from 47 * Switching between stacks is valid when tracing current and in
48 * CONFIG_DEBUG_PREEMPT. get_wchan() calls unwind_frame() on sleeping 48 * non-preemptible context.
49 * task stacks, we can be pre-empted in this case, so
50 * {raw_,}smp_processor_id() may give us the wrong value. Sleeping
51 * tasks can't ever be on an interrupt stack, so regardless of cpu,
52 * the checks will always fail.
53 */ 49 */
54 irq_stack_ptr = IRQ_STACK_PTR(raw_smp_processor_id()); 50 if (tsk == current && !preemptible())
51 irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
52 else
53 irq_stack_ptr = 0;
55 54
56 low = frame->sp; 55 low = frame->sp;
57 /* irq stacks are not THREAD_SIZE aligned */ 56 /* irq stacks are not THREAD_SIZE aligned */
@@ -64,8 +63,8 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
64 return -EINVAL; 63 return -EINVAL;
65 64
66 frame->sp = fp + 0x10; 65 frame->sp = fp + 0x10;
67 frame->fp = *(unsigned long *)(fp); 66 frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
68 frame->pc = *(unsigned long *)(fp + 8); 67 frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
69 68
70#ifdef CONFIG_FUNCTION_GRAPH_TRACER 69#ifdef CONFIG_FUNCTION_GRAPH_TRACER
71 if (tsk && tsk->ret_stack && 70 if (tsk && tsk->ret_stack &&
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index cbedd724f48e..c5392081b49b 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -146,9 +146,18 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
146static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) 146static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
147{ 147{
148 struct stackframe frame; 148 struct stackframe frame;
149 unsigned long irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id()); 149 unsigned long irq_stack_ptr;
150 int skip; 150 int skip;
151 151
152 /*
153 * Switching between stacks is valid when tracing current and in
154 * non-preemptible context.
155 */
156 if (tsk == current && !preemptible())
157 irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
158 else
159 irq_stack_ptr = 0;
160
152 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); 161 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
153 162
154 if (!tsk) 163 if (!tsk)
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index fcb778899a38..9e54ad7c240a 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -194,7 +194,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
194 u64 val; 194 u64 val;
195 195
196 val = kvm_arm_timer_get_reg(vcpu, reg->id); 196 val = kvm_arm_timer_get_reg(vcpu, reg->id);
197 return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)); 197 return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
198} 198}
199 199
200/** 200/**
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 3e568dcd907b..d073b5a216f7 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -64,7 +64,7 @@ __do_hyp_init:
64 mrs x4, tcr_el1 64 mrs x4, tcr_el1
65 ldr x5, =TCR_EL2_MASK 65 ldr x5, =TCR_EL2_MASK
66 and x4, x4, x5 66 and x4, x4, x5
67 ldr x5, =TCR_EL2_FLAGS 67 mov x5, #TCR_EL2_RES1
68 orr x4, x4, x5 68 orr x4, x4, x5
69 69
70#ifndef CONFIG_ARM64_VA_BITS_48 70#ifndef CONFIG_ARM64_VA_BITS_48
@@ -85,15 +85,17 @@ __do_hyp_init:
85 ldr_l x5, idmap_t0sz 85 ldr_l x5, idmap_t0sz
86 bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH 86 bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
87#endif 87#endif
88 msr tcr_el2, x4
89
90 ldr x4, =VTCR_EL2_FLAGS
91 /* 88 /*
92 * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in 89 * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in
93 * VTCR_EL2. 90 * TCR_EL2 and VTCR_EL2.
94 */ 91 */
95 mrs x5, ID_AA64MMFR0_EL1 92 mrs x5, ID_AA64MMFR0_EL1
96 bfi x4, x5, #16, #3 93 bfi x4, x5, #16, #3
94
95 msr tcr_el2, x4
96
97 ldr x4, =VTCR_EL2_FLAGS
98 bfi x4, x5, #16, #3
97 /* 99 /*
98 * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS bit in 100 * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS bit in
99 * VTCR_EL2. 101 * VTCR_EL2.
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 9142e082f5f3..5dd2a26444ec 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -149,16 +149,6 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
149 149
150 switch (nr_pri_bits) { 150 switch (nr_pri_bits) {
151 case 7: 151 case 7:
152 write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
153 write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
154 case 6:
155 write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2);
156 default:
157 write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
158 }
159
160 switch (nr_pri_bits) {
161 case 7:
162 write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2); 152 write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2);
163 write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2); 153 write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2);
164 case 6: 154 case 6:
@@ -167,6 +157,16 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
167 write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2); 157 write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2);
168 } 158 }
169 159
160 switch (nr_pri_bits) {
161 case 7:
162 write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
163 write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
164 case 6:
165 write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2);
166 default:
167 write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
168 }
169
170 switch (max_lr_idx) { 170 switch (max_lr_idx) {
171 case 15: 171 case 15:
172 write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)], ICH_LR15_EL2); 172 write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)], ICH_LR15_EL2);
diff --git a/arch/arm64/lib/strnlen.S b/arch/arm64/lib/strnlen.S
index 2ca665711bf2..eae38da6e0bb 100644
--- a/arch/arm64/lib/strnlen.S
+++ b/arch/arm64/lib/strnlen.S
@@ -168,4 +168,4 @@ CPU_LE( lsr tmp2, tmp2, tmp4 ) /* Shift (tmp1 & 63). */
168.Lhit_limit: 168.Lhit_limit:
169 mov len, limit 169 mov len, limit
170 ret 170 ret
171ENDPROC(strnlen) 171ENDPIPROC(strnlen)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 331c4ca6205c..a6e757cbab77 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -933,6 +933,10 @@ static int __init __iommu_dma_init(void)
933 ret = register_iommu_dma_ops_notifier(&platform_bus_type); 933 ret = register_iommu_dma_ops_notifier(&platform_bus_type);
934 if (!ret) 934 if (!ret)
935 ret = register_iommu_dma_ops_notifier(&amba_bustype); 935 ret = register_iommu_dma_ops_notifier(&amba_bustype);
936
937 /* handle devices queued before this arch_initcall */
938 if (!ret)
939 __iommu_attach_notifier(NULL, BUS_NOTIFY_ADD_DEVICE, NULL);
936 return ret; 940 return ret;
937} 941}
938arch_initcall(__iommu_dma_init); 942arch_initcall(__iommu_dma_init);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 92ddac1e8ca2..abe2a9542b3a 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -371,6 +371,13 @@ static int __kprobes do_translation_fault(unsigned long addr,
371 return 0; 371 return 0;
372} 372}
373 373
374static int do_alignment_fault(unsigned long addr, unsigned int esr,
375 struct pt_regs *regs)
376{
377 do_bad_area(addr, esr, regs);
378 return 0;
379}
380
374/* 381/*
375 * This abort handler always returns "fault". 382 * This abort handler always returns "fault".
376 */ 383 */
@@ -418,7 +425,7 @@ static struct fault_info {
418 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, 425 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
419 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, 426 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
420 { do_bad, SIGBUS, 0, "unknown 32" }, 427 { do_bad, SIGBUS, 0, "unknown 32" },
421 { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" }, 428 { do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" },
422 { do_bad, SIGBUS, 0, "unknown 34" }, 429 { do_bad, SIGBUS, 0, "unknown 34" },
423 { do_bad, SIGBUS, 0, "unknown 35" }, 430 { do_bad, SIGBUS, 0, "unknown 35" },
424 { do_bad, SIGBUS, 0, "unknown 36" }, 431 { do_bad, SIGBUS, 0, "unknown 36" },
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index f3b061e67bfe..7802f216a67a 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -319,8 +319,8 @@ void __init mem_init(void)
319#endif 319#endif
320 MLG(VMALLOC_START, VMALLOC_END), 320 MLG(VMALLOC_START, VMALLOC_END),
321#ifdef CONFIG_SPARSEMEM_VMEMMAP 321#ifdef CONFIG_SPARSEMEM_VMEMMAP
322 MLG((unsigned long)vmemmap, 322 MLG(VMEMMAP_START,
323 (unsigned long)vmemmap + VMEMMAP_SIZE), 323 VMEMMAP_START + VMEMMAP_SIZE),
324 MLM((unsigned long)virt_to_page(PAGE_OFFSET), 324 MLM((unsigned long)virt_to_page(PAGE_OFFSET),
325 (unsigned long)virt_to_page(high_memory)), 325 (unsigned long)virt_to_page(high_memory)),
326#endif 326#endif
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 4c893b5189dd..232f787a088a 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -53,10 +53,10 @@ unsigned long arch_mmap_rnd(void)
53 53
54#ifdef CONFIG_COMPAT 54#ifdef CONFIG_COMPAT
55 if (test_thread_flag(TIF_32BIT)) 55 if (test_thread_flag(TIF_32BIT))
56 rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_compat_bits) - 1); 56 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
57 else 57 else
58#endif 58#endif
59 rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1); 59 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
60 return rnd << PAGE_SHIFT; 60 return rnd << PAGE_SHIFT;
61} 61}
62 62
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index fc96e814188e..d1fc4796025e 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -108,6 +108,8 @@ CONFIG_NFT_NAT=m
108CONFIG_NFT_QUEUE=m 108CONFIG_NFT_QUEUE=m
109CONFIG_NFT_REJECT=m 109CONFIG_NFT_REJECT=m
110CONFIG_NFT_COMPAT=m 110CONFIG_NFT_COMPAT=m
111CONFIG_NFT_DUP_NETDEV=m
112CONFIG_NFT_FWD_NETDEV=m
111CONFIG_NETFILTER_XT_SET=m 113CONFIG_NETFILTER_XT_SET=m
112CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 114CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
113CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 115CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -266,6 +268,12 @@ CONFIG_L2TP=m
266CONFIG_BRIDGE=m 268CONFIG_BRIDGE=m
267CONFIG_ATALK=m 269CONFIG_ATALK=m
268CONFIG_6LOWPAN=m 270CONFIG_6LOWPAN=m
271CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
272CONFIG_6LOWPAN_GHC_UDP=m
273CONFIG_6LOWPAN_GHC_ICMPV6=m
274CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
275CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
276CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
269CONFIG_DNS_RESOLVER=y 277CONFIG_DNS_RESOLVER=y
270CONFIG_BATMAN_ADV=m 278CONFIG_BATMAN_ADV=m
271CONFIG_BATMAN_ADV_DAT=y 279CONFIG_BATMAN_ADV_DAT=y
@@ -366,6 +374,7 @@ CONFIG_ARIADNE=y
366# CONFIG_NET_VENDOR_INTEL is not set 374# CONFIG_NET_VENDOR_INTEL is not set
367# CONFIG_NET_VENDOR_MARVELL is not set 375# CONFIG_NET_VENDOR_MARVELL is not set
368# CONFIG_NET_VENDOR_MICREL is not set 376# CONFIG_NET_VENDOR_MICREL is not set
377# CONFIG_NET_VENDOR_NETRONOME is not set
369CONFIG_HYDRA=y 378CONFIG_HYDRA=y
370CONFIG_APNE=y 379CONFIG_APNE=y
371CONFIG_ZORRO8390=y 380CONFIG_ZORRO8390=y
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 05c904f08d9d..9bfe8be3658c 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -106,6 +106,8 @@ CONFIG_NFT_NAT=m
106CONFIG_NFT_QUEUE=m 106CONFIG_NFT_QUEUE=m
107CONFIG_NFT_REJECT=m 107CONFIG_NFT_REJECT=m
108CONFIG_NFT_COMPAT=m 108CONFIG_NFT_COMPAT=m
109CONFIG_NFT_DUP_NETDEV=m
110CONFIG_NFT_FWD_NETDEV=m
109CONFIG_NETFILTER_XT_SET=m 111CONFIG_NETFILTER_XT_SET=m
110CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 112CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
111CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 113CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -264,6 +266,12 @@ CONFIG_L2TP=m
264CONFIG_BRIDGE=m 266CONFIG_BRIDGE=m
265CONFIG_ATALK=m 267CONFIG_ATALK=m
266CONFIG_6LOWPAN=m 268CONFIG_6LOWPAN=m
269CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
270CONFIG_6LOWPAN_GHC_UDP=m
271CONFIG_6LOWPAN_GHC_ICMPV6=m
272CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
273CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
274CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
267CONFIG_DNS_RESOLVER=y 275CONFIG_DNS_RESOLVER=y
268CONFIG_BATMAN_ADV=m 276CONFIG_BATMAN_ADV=m
269CONFIG_BATMAN_ADV_DAT=y 277CONFIG_BATMAN_ADV_DAT=y
@@ -344,6 +352,7 @@ CONFIG_VETH=m
344# CONFIG_NET_VENDOR_MARVELL is not set 352# CONFIG_NET_VENDOR_MARVELL is not set
345# CONFIG_NET_VENDOR_MICREL is not set 353# CONFIG_NET_VENDOR_MICREL is not set
346# CONFIG_NET_VENDOR_NATSEMI is not set 354# CONFIG_NET_VENDOR_NATSEMI is not set
355# CONFIG_NET_VENDOR_NETRONOME is not set
347# CONFIG_NET_VENDOR_QUALCOMM is not set 356# CONFIG_NET_VENDOR_QUALCOMM is not set
348# CONFIG_NET_VENDOR_RENESAS is not set 357# CONFIG_NET_VENDOR_RENESAS is not set
349# CONFIG_NET_VENDOR_ROCKER is not set 358# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index d572b731c510..ebdcfae55580 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -106,6 +106,8 @@ CONFIG_NFT_NAT=m
106CONFIG_NFT_QUEUE=m 106CONFIG_NFT_QUEUE=m
107CONFIG_NFT_REJECT=m 107CONFIG_NFT_REJECT=m
108CONFIG_NFT_COMPAT=m 108CONFIG_NFT_COMPAT=m
109CONFIG_NFT_DUP_NETDEV=m
110CONFIG_NFT_FWD_NETDEV=m
109CONFIG_NETFILTER_XT_SET=m 111CONFIG_NETFILTER_XT_SET=m
110CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 112CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
111CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 113CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -264,6 +266,12 @@ CONFIG_L2TP=m
264CONFIG_BRIDGE=m 266CONFIG_BRIDGE=m
265CONFIG_ATALK=m 267CONFIG_ATALK=m
266CONFIG_6LOWPAN=m 268CONFIG_6LOWPAN=m
269CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
270CONFIG_6LOWPAN_GHC_UDP=m
271CONFIG_6LOWPAN_GHC_ICMPV6=m
272CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
273CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
274CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
267CONFIG_DNS_RESOLVER=y 275CONFIG_DNS_RESOLVER=y
268CONFIG_BATMAN_ADV=m 276CONFIG_BATMAN_ADV=m
269CONFIG_BATMAN_ADV_DAT=y 277CONFIG_BATMAN_ADV_DAT=y
@@ -353,6 +361,7 @@ CONFIG_ATARILANCE=y
353# CONFIG_NET_VENDOR_INTEL is not set 361# CONFIG_NET_VENDOR_INTEL is not set
354# CONFIG_NET_VENDOR_MARVELL is not set 362# CONFIG_NET_VENDOR_MARVELL is not set
355# CONFIG_NET_VENDOR_MICREL is not set 363# CONFIG_NET_VENDOR_MICREL is not set
364# CONFIG_NET_VENDOR_NETRONOME is not set
356CONFIG_NE2000=y 365CONFIG_NE2000=y
357# CONFIG_NET_VENDOR_QUALCOMM is not set 366# CONFIG_NET_VENDOR_QUALCOMM is not set
358# CONFIG_NET_VENDOR_RENESAS is not set 367# CONFIG_NET_VENDOR_RENESAS is not set
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 11a30c65ad44..8acc65e54995 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -104,6 +104,8 @@ CONFIG_NFT_NAT=m
104CONFIG_NFT_QUEUE=m 104CONFIG_NFT_QUEUE=m
105CONFIG_NFT_REJECT=m 105CONFIG_NFT_REJECT=m
106CONFIG_NFT_COMPAT=m 106CONFIG_NFT_COMPAT=m
107CONFIG_NFT_DUP_NETDEV=m
108CONFIG_NFT_FWD_NETDEV=m
107CONFIG_NETFILTER_XT_SET=m 109CONFIG_NETFILTER_XT_SET=m
108CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 110CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
109CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 111CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -262,6 +264,12 @@ CONFIG_L2TP=m
262CONFIG_BRIDGE=m 264CONFIG_BRIDGE=m
263CONFIG_ATALK=m 265CONFIG_ATALK=m
264CONFIG_6LOWPAN=m 266CONFIG_6LOWPAN=m
267CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
268CONFIG_6LOWPAN_GHC_UDP=m
269CONFIG_6LOWPAN_GHC_ICMPV6=m
270CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
271CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
272CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
265CONFIG_DNS_RESOLVER=y 273CONFIG_DNS_RESOLVER=y
266CONFIG_BATMAN_ADV=m 274CONFIG_BATMAN_ADV=m
267CONFIG_BATMAN_ADV_DAT=y 275CONFIG_BATMAN_ADV_DAT=y
@@ -343,6 +351,7 @@ CONFIG_BVME6000_NET=y
343# CONFIG_NET_VENDOR_MARVELL is not set 351# CONFIG_NET_VENDOR_MARVELL is not set
344# CONFIG_NET_VENDOR_MICREL is not set 352# CONFIG_NET_VENDOR_MICREL is not set
345# CONFIG_NET_VENDOR_NATSEMI is not set 353# CONFIG_NET_VENDOR_NATSEMI is not set
354# CONFIG_NET_VENDOR_NETRONOME is not set
346# CONFIG_NET_VENDOR_QUALCOMM is not set 355# CONFIG_NET_VENDOR_QUALCOMM is not set
347# CONFIG_NET_VENDOR_RENESAS is not set 356# CONFIG_NET_VENDOR_RENESAS is not set
348# CONFIG_NET_VENDOR_ROCKER is not set 357# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 6630a5154b9d..0c6a3d52b26e 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -106,6 +106,8 @@ CONFIG_NFT_NAT=m
106CONFIG_NFT_QUEUE=m 106CONFIG_NFT_QUEUE=m
107CONFIG_NFT_REJECT=m 107CONFIG_NFT_REJECT=m
108CONFIG_NFT_COMPAT=m 108CONFIG_NFT_COMPAT=m
109CONFIG_NFT_DUP_NETDEV=m
110CONFIG_NFT_FWD_NETDEV=m
109CONFIG_NETFILTER_XT_SET=m 111CONFIG_NETFILTER_XT_SET=m
110CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 112CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
111CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 113CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -264,6 +266,12 @@ CONFIG_L2TP=m
264CONFIG_BRIDGE=m 266CONFIG_BRIDGE=m
265CONFIG_ATALK=m 267CONFIG_ATALK=m
266CONFIG_6LOWPAN=m 268CONFIG_6LOWPAN=m
269CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
270CONFIG_6LOWPAN_GHC_UDP=m
271CONFIG_6LOWPAN_GHC_ICMPV6=m
272CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
273CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
274CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
267CONFIG_DNS_RESOLVER=y 275CONFIG_DNS_RESOLVER=y
268CONFIG_BATMAN_ADV=m 276CONFIG_BATMAN_ADV=m
269CONFIG_BATMAN_ADV_DAT=y 277CONFIG_BATMAN_ADV_DAT=y
@@ -345,6 +353,7 @@ CONFIG_HPLANCE=y
345# CONFIG_NET_VENDOR_MARVELL is not set 353# CONFIG_NET_VENDOR_MARVELL is not set
346# CONFIG_NET_VENDOR_MICREL is not set 354# CONFIG_NET_VENDOR_MICREL is not set
347# CONFIG_NET_VENDOR_NATSEMI is not set 355# CONFIG_NET_VENDOR_NATSEMI is not set
356# CONFIG_NET_VENDOR_NETRONOME is not set
348# CONFIG_NET_VENDOR_QUALCOMM is not set 357# CONFIG_NET_VENDOR_QUALCOMM is not set
349# CONFIG_NET_VENDOR_RENESAS is not set 358# CONFIG_NET_VENDOR_RENESAS is not set
350# CONFIG_NET_VENDOR_ROCKER is not set 359# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 1d90b71d0903..12a8a6cb32f4 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -105,6 +105,8 @@ CONFIG_NFT_NAT=m
105CONFIG_NFT_QUEUE=m 105CONFIG_NFT_QUEUE=m
106CONFIG_NFT_REJECT=m 106CONFIG_NFT_REJECT=m
107CONFIG_NFT_COMPAT=m 107CONFIG_NFT_COMPAT=m
108CONFIG_NFT_DUP_NETDEV=m
109CONFIG_NFT_FWD_NETDEV=m
108CONFIG_NETFILTER_XT_SET=m 110CONFIG_NETFILTER_XT_SET=m
109CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 111CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
110CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 112CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -266,6 +268,12 @@ CONFIG_DEV_APPLETALK=m
266CONFIG_IPDDP=m 268CONFIG_IPDDP=m
267CONFIG_IPDDP_ENCAP=y 269CONFIG_IPDDP_ENCAP=y
268CONFIG_6LOWPAN=m 270CONFIG_6LOWPAN=m
271CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
272CONFIG_6LOWPAN_GHC_UDP=m
273CONFIG_6LOWPAN_GHC_ICMPV6=m
274CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
275CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
276CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
269CONFIG_DNS_RESOLVER=y 277CONFIG_DNS_RESOLVER=y
270CONFIG_BATMAN_ADV=m 278CONFIG_BATMAN_ADV=m
271CONFIG_BATMAN_ADV_DAT=y 279CONFIG_BATMAN_ADV_DAT=y
@@ -362,6 +370,7 @@ CONFIG_MAC89x0=y
362# CONFIG_NET_VENDOR_MARVELL is not set 370# CONFIG_NET_VENDOR_MARVELL is not set
363# CONFIG_NET_VENDOR_MICREL is not set 371# CONFIG_NET_VENDOR_MICREL is not set
364CONFIG_MACSONIC=y 372CONFIG_MACSONIC=y
373# CONFIG_NET_VENDOR_NETRONOME is not set
365CONFIG_MAC8390=y 374CONFIG_MAC8390=y
366# CONFIG_NET_VENDOR_QUALCOMM is not set 375# CONFIG_NET_VENDOR_QUALCOMM is not set
367# CONFIG_NET_VENDOR_RENESAS is not set 376# CONFIG_NET_VENDOR_RENESAS is not set
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 1fd21c1ca87f..64ff2dcb34c8 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -115,6 +115,8 @@ CONFIG_NFT_NAT=m
115CONFIG_NFT_QUEUE=m 115CONFIG_NFT_QUEUE=m
116CONFIG_NFT_REJECT=m 116CONFIG_NFT_REJECT=m
117CONFIG_NFT_COMPAT=m 117CONFIG_NFT_COMPAT=m
118CONFIG_NFT_DUP_NETDEV=m
119CONFIG_NFT_FWD_NETDEV=m
118CONFIG_NETFILTER_XT_SET=m 120CONFIG_NETFILTER_XT_SET=m
119CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 121CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
120CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 122CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -276,6 +278,12 @@ CONFIG_DEV_APPLETALK=m
276CONFIG_IPDDP=m 278CONFIG_IPDDP=m
277CONFIG_IPDDP_ENCAP=y 279CONFIG_IPDDP_ENCAP=y
278CONFIG_6LOWPAN=m 280CONFIG_6LOWPAN=m
281CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
282CONFIG_6LOWPAN_GHC_UDP=m
283CONFIG_6LOWPAN_GHC_ICMPV6=m
284CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
285CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
286CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
279CONFIG_DNS_RESOLVER=y 287CONFIG_DNS_RESOLVER=y
280CONFIG_BATMAN_ADV=m 288CONFIG_BATMAN_ADV=m
281CONFIG_BATMAN_ADV_DAT=y 289CONFIG_BATMAN_ADV_DAT=y
@@ -404,6 +412,7 @@ CONFIG_MVME16x_NET=y
404# CONFIG_NET_VENDOR_MARVELL is not set 412# CONFIG_NET_VENDOR_MARVELL is not set
405# CONFIG_NET_VENDOR_MICREL is not set 413# CONFIG_NET_VENDOR_MICREL is not set
406CONFIG_MACSONIC=y 414CONFIG_MACSONIC=y
415# CONFIG_NET_VENDOR_NETRONOME is not set
407CONFIG_HYDRA=y 416CONFIG_HYDRA=y
408CONFIG_MAC8390=y 417CONFIG_MAC8390=y
409CONFIG_NE2000=y 418CONFIG_NE2000=y
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 74e10f79d7b1..07fc6abcfe0c 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -103,6 +103,8 @@ CONFIG_NFT_NAT=m
103CONFIG_NFT_QUEUE=m 103CONFIG_NFT_QUEUE=m
104CONFIG_NFT_REJECT=m 104CONFIG_NFT_REJECT=m
105CONFIG_NFT_COMPAT=m 105CONFIG_NFT_COMPAT=m
106CONFIG_NFT_DUP_NETDEV=m
107CONFIG_NFT_FWD_NETDEV=m
106CONFIG_NETFILTER_XT_SET=m 108CONFIG_NETFILTER_XT_SET=m
107CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 109CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
108CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 110CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -261,6 +263,12 @@ CONFIG_L2TP=m
261CONFIG_BRIDGE=m 263CONFIG_BRIDGE=m
262CONFIG_ATALK=m 264CONFIG_ATALK=m
263CONFIG_6LOWPAN=m 265CONFIG_6LOWPAN=m
266CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
267CONFIG_6LOWPAN_GHC_UDP=m
268CONFIG_6LOWPAN_GHC_ICMPV6=m
269CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
270CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
271CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
264CONFIG_DNS_RESOLVER=y 272CONFIG_DNS_RESOLVER=y
265CONFIG_BATMAN_ADV=m 273CONFIG_BATMAN_ADV=m
266CONFIG_BATMAN_ADV_DAT=y 274CONFIG_BATMAN_ADV_DAT=y
@@ -343,6 +351,7 @@ CONFIG_MVME147_NET=y
343# CONFIG_NET_VENDOR_MARVELL is not set 351# CONFIG_NET_VENDOR_MARVELL is not set
344# CONFIG_NET_VENDOR_MICREL is not set 352# CONFIG_NET_VENDOR_MICREL is not set
345# CONFIG_NET_VENDOR_NATSEMI is not set 353# CONFIG_NET_VENDOR_NATSEMI is not set
354# CONFIG_NET_VENDOR_NETRONOME is not set
346# CONFIG_NET_VENDOR_QUALCOMM is not set 355# CONFIG_NET_VENDOR_QUALCOMM is not set
347# CONFIG_NET_VENDOR_RENESAS is not set 356# CONFIG_NET_VENDOR_RENESAS is not set
348# CONFIG_NET_VENDOR_ROCKER is not set 357# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 7034e716f166..69903ded88f7 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -104,6 +104,8 @@ CONFIG_NFT_NAT=m
104CONFIG_NFT_QUEUE=m 104CONFIG_NFT_QUEUE=m
105CONFIG_NFT_REJECT=m 105CONFIG_NFT_REJECT=m
106CONFIG_NFT_COMPAT=m 106CONFIG_NFT_COMPAT=m
107CONFIG_NFT_DUP_NETDEV=m
108CONFIG_NFT_FWD_NETDEV=m
107CONFIG_NETFILTER_XT_SET=m 109CONFIG_NETFILTER_XT_SET=m
108CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 110CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
109CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 111CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -262,6 +264,12 @@ CONFIG_L2TP=m
262CONFIG_BRIDGE=m 264CONFIG_BRIDGE=m
263CONFIG_ATALK=m 265CONFIG_ATALK=m
264CONFIG_6LOWPAN=m 266CONFIG_6LOWPAN=m
267CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
268CONFIG_6LOWPAN_GHC_UDP=m
269CONFIG_6LOWPAN_GHC_ICMPV6=m
270CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
271CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
272CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
265CONFIG_DNS_RESOLVER=y 273CONFIG_DNS_RESOLVER=y
266CONFIG_BATMAN_ADV=m 274CONFIG_BATMAN_ADV=m
267CONFIG_BATMAN_ADV_DAT=y 275CONFIG_BATMAN_ADV_DAT=y
@@ -343,6 +351,7 @@ CONFIG_MVME16x_NET=y
343# CONFIG_NET_VENDOR_MARVELL is not set 351# CONFIG_NET_VENDOR_MARVELL is not set
344# CONFIG_NET_VENDOR_MICREL is not set 352# CONFIG_NET_VENDOR_MICREL is not set
345# CONFIG_NET_VENDOR_NATSEMI is not set 353# CONFIG_NET_VENDOR_NATSEMI is not set
354# CONFIG_NET_VENDOR_NETRONOME is not set
346# CONFIG_NET_VENDOR_QUALCOMM is not set 355# CONFIG_NET_VENDOR_QUALCOMM is not set
347# CONFIG_NET_VENDOR_RENESAS is not set 356# CONFIG_NET_VENDOR_RENESAS is not set
348# CONFIG_NET_VENDOR_ROCKER is not set 357# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index f7deb5f702a6..bd8401686dde 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -104,6 +104,8 @@ CONFIG_NFT_NAT=m
104CONFIG_NFT_QUEUE=m 104CONFIG_NFT_QUEUE=m
105CONFIG_NFT_REJECT=m 105CONFIG_NFT_REJECT=m
106CONFIG_NFT_COMPAT=m 106CONFIG_NFT_COMPAT=m
107CONFIG_NFT_DUP_NETDEV=m
108CONFIG_NFT_FWD_NETDEV=m
107CONFIG_NETFILTER_XT_SET=m 109CONFIG_NETFILTER_XT_SET=m
108CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 110CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
109CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 111CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -262,6 +264,12 @@ CONFIG_L2TP=m
262CONFIG_BRIDGE=m 264CONFIG_BRIDGE=m
263CONFIG_ATALK=m 265CONFIG_ATALK=m
264CONFIG_6LOWPAN=m 266CONFIG_6LOWPAN=m
267CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
268CONFIG_6LOWPAN_GHC_UDP=m
269CONFIG_6LOWPAN_GHC_ICMPV6=m
270CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
271CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
272CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
265CONFIG_DNS_RESOLVER=y 273CONFIG_DNS_RESOLVER=y
266CONFIG_BATMAN_ADV=m 274CONFIG_BATMAN_ADV=m
267CONFIG_BATMAN_ADV_DAT=y 275CONFIG_BATMAN_ADV_DAT=y
@@ -352,6 +360,7 @@ CONFIG_VETH=m
352# CONFIG_NET_VENDOR_INTEL is not set 360# CONFIG_NET_VENDOR_INTEL is not set
353# CONFIG_NET_VENDOR_MARVELL is not set 361# CONFIG_NET_VENDOR_MARVELL is not set
354# CONFIG_NET_VENDOR_MICREL is not set 362# CONFIG_NET_VENDOR_MICREL is not set
363# CONFIG_NET_VENDOR_NETRONOME is not set
355CONFIG_NE2000=y 364CONFIG_NE2000=y
356# CONFIG_NET_VENDOR_QUALCOMM is not set 365# CONFIG_NET_VENDOR_QUALCOMM is not set
357# CONFIG_NET_VENDOR_RENESAS is not set 366# CONFIG_NET_VENDOR_RENESAS is not set
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 0ce79eb0d805..5f9fb3ab9636 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -101,6 +101,8 @@ CONFIG_NFT_NAT=m
101CONFIG_NFT_QUEUE=m 101CONFIG_NFT_QUEUE=m
102CONFIG_NFT_REJECT=m 102CONFIG_NFT_REJECT=m
103CONFIG_NFT_COMPAT=m 103CONFIG_NFT_COMPAT=m
104CONFIG_NFT_DUP_NETDEV=m
105CONFIG_NFT_FWD_NETDEV=m
104CONFIG_NETFILTER_XT_SET=m 106CONFIG_NETFILTER_XT_SET=m
105CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 107CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
106CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 108CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -259,6 +261,12 @@ CONFIG_L2TP=m
259CONFIG_BRIDGE=m 261CONFIG_BRIDGE=m
260CONFIG_ATALK=m 262CONFIG_ATALK=m
261CONFIG_6LOWPAN=m 263CONFIG_6LOWPAN=m
264CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
265CONFIG_6LOWPAN_GHC_UDP=m
266CONFIG_6LOWPAN_GHC_ICMPV6=m
267CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
268CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
269CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
262CONFIG_DNS_RESOLVER=y 270CONFIG_DNS_RESOLVER=y
263CONFIG_BATMAN_ADV=m 271CONFIG_BATMAN_ADV=m
264CONFIG_BATMAN_ADV_DAT=y 272CONFIG_BATMAN_ADV_DAT=y
@@ -340,6 +348,7 @@ CONFIG_SUN3_82586=y
340# CONFIG_NET_VENDOR_MARVELL is not set 348# CONFIG_NET_VENDOR_MARVELL is not set
341# CONFIG_NET_VENDOR_MICREL is not set 349# CONFIG_NET_VENDOR_MICREL is not set
342# CONFIG_NET_VENDOR_NATSEMI is not set 350# CONFIG_NET_VENDOR_NATSEMI is not set
351# CONFIG_NET_VENDOR_NETRONOME is not set
343# CONFIG_NET_VENDOR_QUALCOMM is not set 352# CONFIG_NET_VENDOR_QUALCOMM is not set
344# CONFIG_NET_VENDOR_RENESAS is not set 353# CONFIG_NET_VENDOR_RENESAS is not set
345# CONFIG_NET_VENDOR_ROCKER is not set 354# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 4cb787e4991f..5d1c674530e2 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -101,6 +101,8 @@ CONFIG_NFT_NAT=m
101CONFIG_NFT_QUEUE=m 101CONFIG_NFT_QUEUE=m
102CONFIG_NFT_REJECT=m 102CONFIG_NFT_REJECT=m
103CONFIG_NFT_COMPAT=m 103CONFIG_NFT_COMPAT=m
104CONFIG_NFT_DUP_NETDEV=m
105CONFIG_NFT_FWD_NETDEV=m
104CONFIG_NETFILTER_XT_SET=m 106CONFIG_NETFILTER_XT_SET=m
105CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 107CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
106CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 108CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -259,6 +261,12 @@ CONFIG_L2TP=m
259CONFIG_BRIDGE=m 261CONFIG_BRIDGE=m
260CONFIG_ATALK=m 262CONFIG_ATALK=m
261CONFIG_6LOWPAN=m 263CONFIG_6LOWPAN=m
264CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
265CONFIG_6LOWPAN_GHC_UDP=m
266CONFIG_6LOWPAN_GHC_ICMPV6=m
267CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
268CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
269CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
262CONFIG_DNS_RESOLVER=y 270CONFIG_DNS_RESOLVER=y
263CONFIG_BATMAN_ADV=m 271CONFIG_BATMAN_ADV=m
264CONFIG_BATMAN_ADV_DAT=y 272CONFIG_BATMAN_ADV_DAT=y
@@ -341,6 +349,7 @@ CONFIG_SUN3LANCE=y
341# CONFIG_NET_VENDOR_MARVELL is not set 349# CONFIG_NET_VENDOR_MARVELL is not set
342# CONFIG_NET_VENDOR_MICREL is not set 350# CONFIG_NET_VENDOR_MICREL is not set
343# CONFIG_NET_VENDOR_NATSEMI is not set 351# CONFIG_NET_VENDOR_NATSEMI is not set
352# CONFIG_NET_VENDOR_NETRONOME is not set
344# CONFIG_NET_VENDOR_QUALCOMM is not set 353# CONFIG_NET_VENDOR_QUALCOMM is not set
345# CONFIG_NET_VENDOR_RENESAS is not set 354# CONFIG_NET_VENDOR_RENESAS is not set
346# CONFIG_NET_VENDOR_ROCKER is not set 355# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index f9d96bf86910..bafaff6dcd7b 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
4#include <uapi/asm/unistd.h> 4#include <uapi/asm/unistd.h>
5 5
6 6
7#define NR_syscalls 376 7#define NR_syscalls 377
8 8
9#define __ARCH_WANT_OLD_READDIR 9#define __ARCH_WANT_OLD_READDIR
10#define __ARCH_WANT_OLD_STAT 10#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 36cf129de663..0ca729665f29 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -381,5 +381,6 @@
381#define __NR_userfaultfd 373 381#define __NR_userfaultfd 373
382#define __NR_membarrier 374 382#define __NR_membarrier 374
383#define __NR_mlock2 375 383#define __NR_mlock2 375
384#define __NR_copy_file_range 376
384 385
385#endif /* _UAPI_ASM_M68K_UNISTD_H_ */ 386#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 282cd903f4c4..8bb94261ff97 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -396,3 +396,4 @@ ENTRY(sys_call_table)
396 .long sys_userfaultfd 396 .long sys_userfaultfd
397 .long sys_membarrier 397 .long sys_membarrier
398 .long sys_mlock2 /* 375 */ 398 .long sys_mlock2 /* 375 */
399 .long sys_copy_file_range
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c
index 8c6d76c9b2d6..d9907e57e9b9 100644
--- a/arch/mips/jz4740/gpio.c
+++ b/arch/mips/jz4740/gpio.c
@@ -270,7 +270,7 @@ uint32_t jz_gpio_port_get_value(int port, uint32_t mask)
270} 270}
271EXPORT_SYMBOL(jz_gpio_port_get_value); 271EXPORT_SYMBOL(jz_gpio_port_get_value);
272 272
273#define IRQ_TO_BIT(irq) BIT(irq_to_gpio(irq) & 0x1f) 273#define IRQ_TO_BIT(irq) BIT((irq - JZ4740_IRQ_GPIO(0)) & 0x1f)
274 274
275static void jz_gpio_check_trigger_both(struct jz_gpio_chip *chip, unsigned int irq) 275static void jz_gpio_check_trigger_both(struct jz_gpio_chip *chip, unsigned int irq)
276{ 276{
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S
index 5ce3b746cedc..b4ac6374a38f 100644
--- a/arch/mips/kernel/r2300_fpu.S
+++ b/arch/mips/kernel/r2300_fpu.S
@@ -125,7 +125,7 @@ LEAF(_restore_fp_context)
125 END(_restore_fp_context) 125 END(_restore_fp_context)
126 .set reorder 126 .set reorder
127 127
128 .type fault@function 128 .type fault, @function
129 .ent fault 129 .ent fault
130fault: li v0, -EFAULT 130fault: li v0, -EFAULT
131 jr ra 131 jr ra
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index f09546ee2cdc..17732f876eff 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -358,7 +358,7 @@ LEAF(_restore_msa_all_upper)
358 358
359 .set reorder 359 .set reorder
360 360
361 .type fault@function 361 .type fault, @function
362 .ent fault 362 .ent fault
363fault: li v0, -EFAULT # failure 363fault: li v0, -EFAULT # failure
364 jr ra 364 jr ra
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index ae790c575d4f..bf14da9f3e33 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -690,15 +690,15 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
690asmlinkage void do_ov(struct pt_regs *regs) 690asmlinkage void do_ov(struct pt_regs *regs)
691{ 691{
692 enum ctx_state prev_state; 692 enum ctx_state prev_state;
693 siginfo_t info; 693 siginfo_t info = {
694 .si_signo = SIGFPE,
695 .si_code = FPE_INTOVF,
696 .si_addr = (void __user *)regs->cp0_epc,
697 };
694 698
695 prev_state = exception_enter(); 699 prev_state = exception_enter();
696 die_if_kernel("Integer overflow", regs); 700 die_if_kernel("Integer overflow", regs);
697 701
698 info.si_code = FPE_INTOVF;
699 info.si_signo = SIGFPE;
700 info.si_errno = 0;
701 info.si_addr = (void __user *) regs->cp0_epc;
702 force_sig_info(SIGFPE, &info, current); 702 force_sig_info(SIGFPE, &info, current);
703 exception_exit(prev_state); 703 exception_exit(prev_state);
704} 704}
@@ -874,7 +874,7 @@ out:
874void do_trap_or_bp(struct pt_regs *regs, unsigned int code, 874void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
875 const char *str) 875 const char *str)
876{ 876{
877 siginfo_t info; 877 siginfo_t info = { 0 };
878 char b[40]; 878 char b[40];
879 879
880#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 880#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
@@ -903,7 +903,6 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
903 else 903 else
904 info.si_code = FPE_INTOVF; 904 info.si_code = FPE_INTOVF;
905 info.si_signo = SIGFPE; 905 info.si_signo = SIGFPE;
906 info.si_errno = 0;
907 info.si_addr = (void __user *) regs->cp0_epc; 906 info.si_addr = (void __user *) regs->cp0_epc;
908 force_sig_info(SIGFPE, &info, current); 907 force_sig_info(SIGFPE, &info, current);
909 break; 908 break;
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 8bc3977576e6..3110447ab1e9 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -702,7 +702,7 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
702 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { 702 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
703 void __user *uaddr = (void __user *)(long)reg->addr; 703 void __user *uaddr = (void __user *)(long)reg->addr;
704 704
705 return copy_to_user(uaddr, vs, 16); 705 return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
706 } else { 706 } else {
707 return -EINVAL; 707 return -EINVAL;
708 } 708 }
@@ -732,7 +732,7 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
732 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { 732 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
733 void __user *uaddr = (void __user *)(long)reg->addr; 733 void __user *uaddr = (void __user *)(long)reg->addr;
734 734
735 return copy_from_user(vs, uaddr, 16); 735 return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
736 } else { 736 } else {
737 return -EINVAL; 737 return -EINVAL;
738 } 738 }
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 5c81fdd032c3..353037699512 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -146,7 +146,7 @@ unsigned long arch_mmap_rnd(void)
146{ 146{
147 unsigned long rnd; 147 unsigned long rnd;
148 148
149 rnd = (unsigned long)get_random_int(); 149 rnd = get_random_long();
150 rnd <<= PAGE_SHIFT; 150 rnd <<= PAGE_SHIFT;
151 if (TASK_IS_32BIT_ADDR) 151 if (TASK_IS_32BIT_ADDR)
152 rnd &= 0xfffffful; 152 rnd &= 0xfffffful;
@@ -174,7 +174,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
174 174
175static inline unsigned long brk_rnd(void) 175static inline unsigned long brk_rnd(void)
176{ 176{
177 unsigned long rnd = get_random_int(); 177 unsigned long rnd = get_random_long();
178 178
179 rnd = rnd << PAGE_SHIFT; 179 rnd = rnd << PAGE_SHIFT;
180 /* 8MB for 32bit, 256MB for 64bit */ 180 /* 8MB for 32bit, 256MB for 64bit */
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 249647578e58..91dec32c77b7 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -164,11 +164,13 @@ static int __init mips_sc_probe_cm3(void)
164 164
165 sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK; 165 sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK;
166 sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF; 166 sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF;
167 c->scache.sets = 64 << sets; 167 if (sets)
168 c->scache.sets = 64 << sets;
168 169
169 line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK; 170 line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK;
170 line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF; 171 line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF;
171 c->scache.linesz = 2 << line_sz; 172 if (line_sz)
173 c->scache.linesz = 2 << line_sz;
172 174
173 assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK; 175 assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK;
174 assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF; 176 assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF;
@@ -176,9 +178,12 @@ static int __init mips_sc_probe_cm3(void)
176 c->scache.waysize = c->scache.sets * c->scache.linesz; 178 c->scache.waysize = c->scache.sets * c->scache.linesz;
177 c->scache.waybit = __ffs(c->scache.waysize); 179 c->scache.waybit = __ffs(c->scache.waysize);
178 180
179 c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT; 181 if (c->scache.linesz) {
182 c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
183 return 1;
184 }
180 185
181 return 1; 186 return 0;
182} 187}
183 188
184static inline int __init mips_sc_probe(void) 189static inline int __init mips_sc_probe(void)
diff --git a/arch/parisc/include/asm/floppy.h b/arch/parisc/include/asm/floppy.h
index f84ff12574b7..6d8276cd25ca 100644
--- a/arch/parisc/include/asm/floppy.h
+++ b/arch/parisc/include/asm/floppy.h
@@ -33,7 +33,7 @@
33 * floppy accesses go through the track buffer. 33 * floppy accesses go through the track buffer.
34 */ 34 */
35#define _CROSS_64KB(a,s,vdma) \ 35#define _CROSS_64KB(a,s,vdma) \
36(!vdma && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)) 36(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
37 37
38#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1) 38#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
39 39
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index 35bdccbb2036..b75039f92116 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -361,8 +361,9 @@
361#define __NR_membarrier (__NR_Linux + 343) 361#define __NR_membarrier (__NR_Linux + 343)
362#define __NR_userfaultfd (__NR_Linux + 344) 362#define __NR_userfaultfd (__NR_Linux + 344)
363#define __NR_mlock2 (__NR_Linux + 345) 363#define __NR_mlock2 (__NR_Linux + 345)
364#define __NR_copy_file_range (__NR_Linux + 346)
364 365
365#define __NR_Linux_syscalls (__NR_mlock2 + 1) 366#define __NR_Linux_syscalls (__NR_copy_file_range + 1)
366 367
367 368
368#define __IGNORE_select /* newselect */ 369#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 9585c81f755f..ce0b2b4075c7 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -269,14 +269,19 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
269 269
270long do_syscall_trace_enter(struct pt_regs *regs) 270long do_syscall_trace_enter(struct pt_regs *regs)
271{ 271{
272 long ret = 0;
273
274 /* Do the secure computing check first. */ 272 /* Do the secure computing check first. */
275 secure_computing_strict(regs->gr[20]); 273 secure_computing_strict(regs->gr[20]);
276 274
277 if (test_thread_flag(TIF_SYSCALL_TRACE) && 275 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
278 tracehook_report_syscall_entry(regs)) 276 tracehook_report_syscall_entry(regs)) {
279 ret = -1L; 277 /*
278 * Tracing decided this syscall should not happen or the
279 * debugger stored an invalid system call number. Skip
280 * the system call and the system call restart handling.
281 */
282 regs->gr[20] = -1UL;
283 goto out;
284 }
280 285
281#ifdef CONFIG_64BIT 286#ifdef CONFIG_64BIT
282 if (!is_compat_task()) 287 if (!is_compat_task())
@@ -290,7 +295,8 @@ long do_syscall_trace_enter(struct pt_regs *regs)
290 regs->gr[24] & 0xffffffff, 295 regs->gr[24] & 0xffffffff,
291 regs->gr[23] & 0xffffffff); 296 regs->gr[23] & 0xffffffff);
292 297
293 return ret ? : regs->gr[20]; 298out:
299 return regs->gr[20];
294} 300}
295 301
296void do_syscall_trace_exit(struct pt_regs *regs) 302void do_syscall_trace_exit(struct pt_regs *regs)
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 3fbd7252a4b2..fbafa0d0e2bf 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -343,7 +343,7 @@ tracesys_next:
343#endif 343#endif
344 344
345 comiclr,>>= __NR_Linux_syscalls, %r20, %r0 345 comiclr,>>= __NR_Linux_syscalls, %r20, %r0
346 b,n .Lsyscall_nosys 346 b,n .Ltracesys_nosys
347 347
348 LDREGX %r20(%r19), %r19 348 LDREGX %r20(%r19), %r19
349 349
@@ -359,6 +359,9 @@ tracesys_next:
359 be 0(%sr7,%r19) 359 be 0(%sr7,%r19)
360 ldo R%tracesys_exit(%r2),%r2 360 ldo R%tracesys_exit(%r2),%r2
361 361
362.Ltracesys_nosys:
363 ldo -ENOSYS(%r0),%r28 /* set errno */
364
362 /* Do *not* call this function on the gateway page, because it 365 /* Do *not* call this function on the gateway page, because it
363 makes a direct call to syscall_trace. */ 366 makes a direct call to syscall_trace. */
364 367
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index d4ffcfbc9885..585d50fc75c0 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -441,6 +441,7 @@
441 ENTRY_SAME(membarrier) 441 ENTRY_SAME(membarrier)
442 ENTRY_SAME(userfaultfd) 442 ENTRY_SAME(userfaultfd)
443 ENTRY_SAME(mlock2) /* 345 */ 443 ENTRY_SAME(mlock2) /* 345 */
444 ENTRY_SAME(copy_file_range)
444 445
445 446
446.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b)) 447.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index e4824fd04bb7..9faa18c4f3f7 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -557,7 +557,7 @@ choice
557 557
558config PPC_4K_PAGES 558config PPC_4K_PAGES
559 bool "4k page size" 559 bool "4k page size"
560 select HAVE_ARCH_SOFT_DIRTY if CHECKPOINT_RESTORE && PPC_BOOK3S 560 select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64
561 561
562config PPC_16K_PAGES 562config PPC_16K_PAGES
563 bool "16k page size" 563 bool "16k page size"
@@ -566,7 +566,7 @@ config PPC_16K_PAGES
566config PPC_64K_PAGES 566config PPC_64K_PAGES
567 bool "64k page size" 567 bool "64k page size"
568 depends on !PPC_FSL_BOOK3E && (44x || PPC_STD_MMU_64 || PPC_BOOK3E_64) 568 depends on !PPC_FSL_BOOK3E && (44x || PPC_STD_MMU_64 || PPC_BOOK3E_64)
569 select HAVE_ARCH_SOFT_DIRTY if CHECKPOINT_RESTORE && PPC_BOOK3S 569 select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64
570 570
571config PPC_256K_PAGES 571config PPC_256K_PAGES
572 bool "256k page size" 572 bool "256k page size"
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 8d1c41d28318..ac07a30a7934 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -281,6 +281,10 @@ extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
281extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 281extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
282 pmd_t *pmdp); 282 pmd_t *pmdp);
283 283
284#define __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
285extern void pmdp_huge_split_prepare(struct vm_area_struct *vma,
286 unsigned long address, pmd_t *pmdp);
287
284#define pmd_move_must_withdraw pmd_move_must_withdraw 288#define pmd_move_must_withdraw pmd_move_must_withdraw
285struct spinlock; 289struct spinlock;
286static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, 290static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index c5eb86f3d452..867c39b45df6 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -81,6 +81,7 @@ struct pci_dn;
81#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */ 81#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */
82#define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */ 82#define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */
83#define EEH_PE_REMOVED (1 << 10) /* Removed permanently */ 83#define EEH_PE_REMOVED (1 << 10) /* Removed permanently */
84#define EEH_PE_PRI_BUS (1 << 11) /* Cached primary bus */
84 85
85struct eeh_pe { 86struct eeh_pe {
86 int type; /* PE type: PHB/Bus/Device */ 87 int type; /* PE type: PHB/Bus/Device */
diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h
index 8e86b48d0369..32e36b16773f 100644
--- a/arch/powerpc/include/asm/trace.h
+++ b/arch/powerpc/include/asm/trace.h
@@ -57,12 +57,14 @@ DEFINE_EVENT(ppc64_interrupt_class, timer_interrupt_exit,
57extern void hcall_tracepoint_regfunc(void); 57extern void hcall_tracepoint_regfunc(void);
58extern void hcall_tracepoint_unregfunc(void); 58extern void hcall_tracepoint_unregfunc(void);
59 59
60TRACE_EVENT_FN(hcall_entry, 60TRACE_EVENT_FN_COND(hcall_entry,
61 61
62 TP_PROTO(unsigned long opcode, unsigned long *args), 62 TP_PROTO(unsigned long opcode, unsigned long *args),
63 63
64 TP_ARGS(opcode, args), 64 TP_ARGS(opcode, args),
65 65
66 TP_CONDITION(cpu_online(raw_smp_processor_id())),
67
66 TP_STRUCT__entry( 68 TP_STRUCT__entry(
67 __field(unsigned long, opcode) 69 __field(unsigned long, opcode)
68 ), 70 ),
@@ -76,13 +78,15 @@ TRACE_EVENT_FN(hcall_entry,
76 hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc 78 hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc
77); 79);
78 80
79TRACE_EVENT_FN(hcall_exit, 81TRACE_EVENT_FN_COND(hcall_exit,
80 82
81 TP_PROTO(unsigned long opcode, unsigned long retval, 83 TP_PROTO(unsigned long opcode, unsigned long retval,
82 unsigned long *retbuf), 84 unsigned long *retbuf),
83 85
84 TP_ARGS(opcode, retval, retbuf), 86 TP_ARGS(opcode, retval, retbuf),
85 87
88 TP_CONDITION(cpu_online(raw_smp_processor_id())),
89
86 TP_STRUCT__entry( 90 TP_STRUCT__entry(
87 __field(unsigned long, opcode) 91 __field(unsigned long, opcode)
88 __field(unsigned long, retval) 92 __field(unsigned long, retval)
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 938742135ee0..650cfb31ea3d 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -418,8 +418,7 @@ static void *eeh_rmv_device(void *data, void *userdata)
418 eeh_pcid_put(dev); 418 eeh_pcid_put(dev);
419 if (driver->err_handler && 419 if (driver->err_handler &&
420 driver->err_handler->error_detected && 420 driver->err_handler->error_detected &&
421 driver->err_handler->slot_reset && 421 driver->err_handler->slot_reset)
422 driver->err_handler->resume)
423 return NULL; 422 return NULL;
424 } 423 }
425 424
@@ -564,6 +563,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
564 */ 563 */
565 eeh_pe_state_mark(pe, EEH_PE_KEEP); 564 eeh_pe_state_mark(pe, EEH_PE_KEEP);
566 if (bus) { 565 if (bus) {
566 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
567 pci_lock_rescan_remove(); 567 pci_lock_rescan_remove();
568 pcibios_remove_pci_devices(bus); 568 pcibios_remove_pci_devices(bus);
569 pci_unlock_rescan_remove(); 569 pci_unlock_rescan_remove();
@@ -803,6 +803,7 @@ perm_error:
803 * the their PCI config any more. 803 * the their PCI config any more.
804 */ 804 */
805 if (frozen_bus) { 805 if (frozen_bus) {
806 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
806 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); 807 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
807 808
808 pci_lock_rescan_remove(); 809 pci_lock_rescan_remove();
@@ -886,6 +887,7 @@ static void eeh_handle_special_event(void)
886 continue; 887 continue;
887 888
888 /* Notify all devices to be down */ 889 /* Notify all devices to be down */
890 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
889 bus = eeh_pe_bus_get(phb_pe); 891 bus = eeh_pe_bus_get(phb_pe);
890 eeh_pe_dev_traverse(pe, 892 eeh_pe_dev_traverse(pe,
891 eeh_report_failure, NULL); 893 eeh_report_failure, NULL);
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index ca9e5371930e..98f81800e00c 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -928,7 +928,7 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
928 bus = pe->phb->bus; 928 bus = pe->phb->bus;
929 } else if (pe->type & EEH_PE_BUS || 929 } else if (pe->type & EEH_PE_BUS ||
930 pe->type & EEH_PE_DEVICE) { 930 pe->type & EEH_PE_DEVICE) {
931 if (pe->bus) { 931 if (pe->state & EEH_PE_PRI_BUS) {
932 bus = pe->bus; 932 bus = pe->bus;
933 goto out; 933 goto out;
934 } 934 }
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 05e804cdecaa..aec9a1b1d25b 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -109,8 +109,9 @@ void arch_unregister_hw_breakpoint(struct perf_event *bp)
109 * If the breakpoint is unregistered between a hw_breakpoint_handler() 109 * If the breakpoint is unregistered between a hw_breakpoint_handler()
110 * and the single_step_dabr_instruction(), then cleanup the breakpoint 110 * and the single_step_dabr_instruction(), then cleanup the breakpoint
111 * restoration variables to prevent dangling pointers. 111 * restoration variables to prevent dangling pointers.
112 * FIXME, this should not be using bp->ctx at all! Sayeth peterz.
112 */ 113 */
113 if (bp->ctx && bp->ctx->task) 114 if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L))
114 bp->ctx->task->thread.last_hit_ubp = NULL; 115 bp->ctx->task->thread.last_hit_ubp = NULL;
115} 116}
116 117
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index ac64ffdb52c8..08b7a40de5f8 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -340,7 +340,7 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
340 if (name[0] == '.') { 340 if (name[0] == '.') {
341 if (strcmp(name+1, "TOC.") == 0) 341 if (strcmp(name+1, "TOC.") == 0)
342 syms[i].st_shndx = SHN_ABS; 342 syms[i].st_shndx = SHN_ABS;
343 memmove(name, name+1, strlen(name)); 343 syms[i].st_name++;
344 } 344 }
345 } 345 }
346 } 346 }
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index dccc87e8fee5..3c5736e52a14 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1768,9 +1768,9 @@ static inline unsigned long brk_rnd(void)
1768 1768
1769 /* 8MB for 32bit, 1GB for 64bit */ 1769 /* 8MB for 32bit, 1GB for 64bit */
1770 if (is_32bit_task()) 1770 if (is_32bit_task())
1771 rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); 1771 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
1772 else 1772 else
1773 rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); 1773 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
1774 1774
1775 return rnd << PAGE_SHIFT; 1775 return rnd << PAGE_SHIFT;
1776} 1776}
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c
index 0762c1e08c88..edb09912f0c9 100644
--- a/arch/powerpc/mm/hash64_64k.c
+++ b/arch/powerpc/mm/hash64_64k.c
@@ -111,7 +111,13 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
111 */ 111 */
112 if (!(old_pte & _PAGE_COMBO)) { 112 if (!(old_pte & _PAGE_COMBO)) {
113 flush_hash_page(vpn, rpte, MMU_PAGE_64K, ssize, flags); 113 flush_hash_page(vpn, rpte, MMU_PAGE_64K, ssize, flags);
114 old_pte &= ~_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND; 114 /*
115 * clear the old slot details from the old and new pte.
116 * On hash insert failure we use old pte value and we don't
117 * want slot information there if we have a insert failure.
118 */
119 old_pte &= ~(_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND);
120 new_pte &= ~(_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND);
115 goto htab_insert_hpte; 121 goto htab_insert_hpte;
116 } 122 }
117 /* 123 /*
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 49b152b0f926..eb2accdd76fd 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -78,9 +78,19 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
78 * base page size. This is because demote_segment won't flush 78 * base page size. This is because demote_segment won't flush
79 * hash page table entries. 79 * hash page table entries.
80 */ 80 */
81 if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO)) 81 if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO)) {
82 flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K, 82 flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K,
83 ssize, flags); 83 ssize, flags);
84 /*
85 * With THP, we also clear the slot information with
86 * respect to all the 64K hash pte mapping the 16MB
87 * page. They are all invalid now. This make sure we
88 * don't find the slot valid when we fault with 4k
89 * base page size.
90 *
91 */
92 memset(hpte_slot_array, 0, PTE_FRAG_SIZE);
93 }
84 } 94 }
85 95
86 valid = hpte_valid(hpte_slot_array, index); 96 valid = hpte_valid(hpte_slot_array, index);
diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c
index 7e6d0880813f..83a8be791e06 100644
--- a/arch/powerpc/mm/hugetlbpage-book3e.c
+++ b/arch/powerpc/mm/hugetlbpage-book3e.c
@@ -8,6 +8,8 @@
8#include <linux/mm.h> 8#include <linux/mm.h>
9#include <linux/hugetlb.h> 9#include <linux/hugetlb.h>
10 10
11#include <asm/mmu.h>
12
11#ifdef CONFIG_PPC_FSL_BOOK3E 13#ifdef CONFIG_PPC_FSL_BOOK3E
12#ifdef CONFIG_PPC64 14#ifdef CONFIG_PPC64
13static inline int tlb1_next(void) 15static inline int tlb1_next(void)
@@ -60,6 +62,14 @@ static inline void book3e_tlb_lock(void)
60 unsigned long tmp; 62 unsigned long tmp;
61 int token = smp_processor_id() + 1; 63 int token = smp_processor_id() + 1;
62 64
65 /*
66 * Besides being unnecessary in the absence of SMT, this
67 * check prevents trying to do lbarx/stbcx. on e5500 which
68 * doesn't implement either feature.
69 */
70 if (!cpu_has_feature(CPU_FTR_SMT))
71 return;
72
63 asm volatile("1: lbarx %0, 0, %1;" 73 asm volatile("1: lbarx %0, 0, %1;"
64 "cmpwi %0, 0;" 74 "cmpwi %0, 0;"
65 "bne 2f;" 75 "bne 2f;"
@@ -80,6 +90,9 @@ static inline void book3e_tlb_unlock(void)
80{ 90{
81 struct paca_struct *paca = get_paca(); 91 struct paca_struct *paca = get_paca();
82 92
93 if (!cpu_has_feature(CPU_FTR_SMT))
94 return;
95
83 isync(); 96 isync();
84 paca->tcd_ptr->lock = 0; 97 paca->tcd_ptr->lock = 0;
85} 98}
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index 0f0502e12f6c..4087705ba90f 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -59,9 +59,9 @@ unsigned long arch_mmap_rnd(void)
59 59
60 /* 8MB for 32bit, 1GB for 64bit */ 60 /* 8MB for 32bit, 1GB for 64bit */
61 if (is_32bit_task()) 61 if (is_32bit_task())
62 rnd = (unsigned long)get_random_int() % (1<<(23-PAGE_SHIFT)); 62 rnd = get_random_long() % (1<<(23-PAGE_SHIFT));
63 else 63 else
64 rnd = (unsigned long)get_random_int() % (1<<(30-PAGE_SHIFT)); 64 rnd = get_random_long() % (1UL<<(30-PAGE_SHIFT));
65 65
66 return rnd << PAGE_SHIFT; 66 return rnd << PAGE_SHIFT;
67} 67}
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 3124a20d0fab..cdf2123d46db 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -646,6 +646,28 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
646 return pgtable; 646 return pgtable;
647} 647}
648 648
649void pmdp_huge_split_prepare(struct vm_area_struct *vma,
650 unsigned long address, pmd_t *pmdp)
651{
652 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
653 VM_BUG_ON(REGION_ID(address) != USER_REGION_ID);
654
655 /*
656 * We can't mark the pmd none here, because that will cause a race
657 * against exit_mmap. We need to continue mark pmd TRANS HUGE, while
658 * we spilt, but at the same time we wan't rest of the ppc64 code
659 * not to insert hash pte on this, because we will be modifying
660 * the deposited pgtable in the caller of this function. Hence
661 * clear the _PAGE_USER so that we move the fault handling to
662 * higher level function and that will serialize against ptl.
663 * We need to flush existing hash pte entries here even though,
664 * the translation is still valid, because we will withdraw
665 * pgtable_t after this.
666 */
667 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_USER, 0);
668}
669
670
649/* 671/*
650 * set a new huge pmd. We should not be called for updating 672 * set a new huge pmd. We should not be called for updating
651 * an existing pmd entry. That should go via pmd_hugepage_update. 673 * an existing pmd entry. That should go via pmd_hugepage_update.
@@ -663,10 +685,20 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
663 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); 685 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
664} 686}
665 687
688/*
689 * We use this to invalidate a pmdp entry before switching from a
690 * hugepte to regular pmd entry.
691 */
666void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 692void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
667 pmd_t *pmdp) 693 pmd_t *pmdp)
668{ 694{
669 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); 695 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
696
697 /*
698 * This ensures that generic code that rely on IRQ disabling
699 * to prevent a parallel THP split work as expected.
700 */
701 kick_all_cpus_sync();
670} 702}
671 703
672/* 704/*
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 5f152b95ca0c..87f47e55aab6 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -444,9 +444,12 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
444 * PCI devices of the PE are expected to be removed prior 444 * PCI devices of the PE are expected to be removed prior
445 * to PE reset. 445 * to PE reset.
446 */ 446 */
447 if (!edev->pe->bus) 447 if (!(edev->pe->state & EEH_PE_PRI_BUS)) {
448 edev->pe->bus = pci_find_bus(hose->global_number, 448 edev->pe->bus = pci_find_bus(hose->global_number,
449 pdn->busno); 449 pdn->busno);
450 if (edev->pe->bus)
451 edev->pe->state |= EEH_PE_PRI_BUS;
452 }
450 453
451 /* 454 /*
452 * Enable EEH explicitly so that we will do EEH check 455 * Enable EEH explicitly so that we will do EEH check
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 573ae1994097..f90dc04395bf 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -3180,6 +3180,7 @@ static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
3180 3180
3181static const struct pci_controller_ops pnv_pci_ioda_controller_ops = { 3181static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
3182 .dma_dev_setup = pnv_pci_dma_dev_setup, 3182 .dma_dev_setup = pnv_pci_dma_dev_setup,
3183 .dma_bus_setup = pnv_pci_dma_bus_setup,
3183#ifdef CONFIG_PCI_MSI 3184#ifdef CONFIG_PCI_MSI
3184 .setup_msi_irqs = pnv_setup_msi_irqs, 3185 .setup_msi_irqs = pnv_setup_msi_irqs,
3185 .teardown_msi_irqs = pnv_teardown_msi_irqs, 3186 .teardown_msi_irqs = pnv_teardown_msi_irqs,
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 2f55c86df703..b1ef84a6c9d1 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -599,6 +599,9 @@ int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
599 u64 rpn = __pa(uaddr) >> tbl->it_page_shift; 599 u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
600 long i; 600 long i;
601 601
602 if (proto_tce & TCE_PCI_WRITE)
603 proto_tce |= TCE_PCI_READ;
604
602 for (i = 0; i < npages; i++) { 605 for (i = 0; i < npages; i++) {
603 unsigned long newtce = proto_tce | 606 unsigned long newtce = proto_tce |
604 ((rpn + i) << tbl->it_page_shift); 607 ((rpn + i) << tbl->it_page_shift);
@@ -620,6 +623,9 @@ int pnv_tce_xchg(struct iommu_table *tbl, long index,
620 623
621 BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl)); 624 BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
622 625
626 if (newtce & TCE_PCI_WRITE)
627 newtce |= TCE_PCI_READ;
628
623 oldtce = xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce)); 629 oldtce = xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce));
624 *hpa = be64_to_cpu(oldtce) & ~(TCE_PCI_READ | TCE_PCI_WRITE); 630 *hpa = be64_to_cpu(oldtce) & ~(TCE_PCI_READ | TCE_PCI_WRITE);
625 *direction = iommu_tce_direction(oldtce); 631 *direction = iommu_tce_direction(oldtce);
@@ -760,6 +766,26 @@ void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
760 phb->dma_dev_setup(phb, pdev); 766 phb->dma_dev_setup(phb, pdev);
761} 767}
762 768
769void pnv_pci_dma_bus_setup(struct pci_bus *bus)
770{
771 struct pci_controller *hose = bus->sysdata;
772 struct pnv_phb *phb = hose->private_data;
773 struct pnv_ioda_pe *pe;
774
775 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
776 if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
777 continue;
778
779 if (!pe->pbus)
780 continue;
781
782 if (bus->number == ((pe->rid >> 8) & 0xFF)) {
783 pe->pbus = bus;
784 break;
785 }
786 }
787}
788
763void pnv_pci_shutdown(void) 789void pnv_pci_shutdown(void)
764{ 790{
765 struct pci_controller *hose; 791 struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 7f56313e8d72..00691a9b99af 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -242,6 +242,7 @@ extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
242extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option); 242extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
243 243
244extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev); 244extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev);
245extern void pnv_pci_dma_bus_setup(struct pci_bus *bus);
245extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type); 246extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
246extern void pnv_teardown_msi_irqs(struct pci_dev *pdev); 247extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
247 248
diff --git a/arch/s390/include/asm/fpu/internal.h b/arch/s390/include/asm/fpu/internal.h
index ea91ddfe54eb..629c90865a07 100644
--- a/arch/s390/include/asm/fpu/internal.h
+++ b/arch/s390/include/asm/fpu/internal.h
@@ -40,6 +40,7 @@ static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs)
40static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu) 40static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
41{ 41{
42 fpregs->pad = 0; 42 fpregs->pad = 0;
43 fpregs->fpc = fpu->fpc;
43 if (MACHINE_HAS_VX) 44 if (MACHINE_HAS_VX)
44 convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs); 45 convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
45 else 46 else
@@ -49,6 +50,7 @@ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
49 50
50static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu) 51static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
51{ 52{
53 fpu->fpc = fpregs->fpc;
52 if (MACHINE_HAS_VX) 54 if (MACHINE_HAS_VX)
53 convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs); 55 convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
54 else 56 else
diff --git a/arch/s390/include/asm/livepatch.h b/arch/s390/include/asm/livepatch.h
index 7aa799134a11..a52b6cca873d 100644
--- a/arch/s390/include/asm/livepatch.h
+++ b/arch/s390/include/asm/livepatch.h
@@ -37,7 +37,7 @@ static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
37 regs->psw.addr = ip; 37 regs->psw.addr = ip;
38} 38}
39#else 39#else
40#error Live patching support is disabled; check CONFIG_LIVEPATCH 40#error Include linux/livepatch.h, not asm/livepatch.h
41#endif 41#endif
42 42
43#endif 43#endif
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 66c94417c0ba..4af60374eba0 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -271,7 +271,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs,
271 271
272 /* Restore high gprs from signal stack */ 272 /* Restore high gprs from signal stack */
273 if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high, 273 if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
274 sizeof(&sregs_ext->gprs_high))) 274 sizeof(sregs_ext->gprs_high)))
275 return -EFAULT; 275 return -EFAULT;
276 for (i = 0; i < NUM_GPRS; i++) 276 for (i = 0; i < NUM_GPRS; i++)
277 *(__u32 *)&regs->gprs[i] = gprs_high[i]; 277 *(__u32 *)&regs->gprs[i] = gprs_high[i];
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index cfcba2dd9bb5..0943b11a2f6e 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -260,12 +260,13 @@ static unsigned long __store_trace(struct perf_callchain_entry *entry,
260void perf_callchain_kernel(struct perf_callchain_entry *entry, 260void perf_callchain_kernel(struct perf_callchain_entry *entry,
261 struct pt_regs *regs) 261 struct pt_regs *regs)
262{ 262{
263 unsigned long head; 263 unsigned long head, frame_size;
264 struct stack_frame *head_sf; 264 struct stack_frame *head_sf;
265 265
266 if (user_mode(regs)) 266 if (user_mode(regs))
267 return; 267 return;
268 268
269 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
269 head = regs->gprs[15]; 270 head = regs->gprs[15];
270 head_sf = (struct stack_frame *) head; 271 head_sf = (struct stack_frame *) head;
271 272
@@ -273,8 +274,9 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
273 return; 274 return;
274 275
275 head = head_sf->back_chain; 276 head = head_sf->back_chain;
276 head = __store_trace(entry, head, S390_lowcore.async_stack - ASYNC_SIZE, 277 head = __store_trace(entry, head,
277 S390_lowcore.async_stack); 278 S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
279 S390_lowcore.async_stack + frame_size);
278 280
279 __store_trace(entry, head, S390_lowcore.thread_info, 281 __store_trace(entry, head, S390_lowcore.thread_info,
280 S390_lowcore.thread_info + THREAD_SIZE); 282 S390_lowcore.thread_info + THREAD_SIZE);
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 5acba3cb7220..8f64ebd63767 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -59,26 +59,32 @@ static unsigned long save_context_stack(struct stack_trace *trace,
59 } 59 }
60} 60}
61 61
62void save_stack_trace(struct stack_trace *trace) 62static void __save_stack_trace(struct stack_trace *trace, unsigned long sp)
63{ 63{
64 register unsigned long sp asm ("15"); 64 unsigned long new_sp, frame_size;
65 unsigned long orig_sp, new_sp;
66 65
67 orig_sp = sp; 66 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
68 new_sp = save_context_stack(trace, orig_sp, 67 new_sp = save_context_stack(trace, sp,
69 S390_lowcore.panic_stack - PAGE_SIZE, 68 S390_lowcore.panic_stack + frame_size - PAGE_SIZE,
70 S390_lowcore.panic_stack, 1); 69 S390_lowcore.panic_stack + frame_size, 1);
71 if (new_sp != orig_sp)
72 return;
73 new_sp = save_context_stack(trace, new_sp, 70 new_sp = save_context_stack(trace, new_sp,
74 S390_lowcore.async_stack - ASYNC_SIZE, 71 S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
75 S390_lowcore.async_stack, 1); 72 S390_lowcore.async_stack + frame_size, 1);
76 if (new_sp != orig_sp)
77 return;
78 save_context_stack(trace, new_sp, 73 save_context_stack(trace, new_sp,
79 S390_lowcore.thread_info, 74 S390_lowcore.thread_info,
80 S390_lowcore.thread_info + THREAD_SIZE, 1); 75 S390_lowcore.thread_info + THREAD_SIZE, 1);
81} 76}
77
78void save_stack_trace(struct stack_trace *trace)
79{
80 register unsigned long r15 asm ("15");
81 unsigned long sp;
82
83 sp = r15;
84 __save_stack_trace(trace, sp);
85 if (trace->nr_entries < trace->max_entries)
86 trace->entries[trace->nr_entries++] = ULONG_MAX;
87}
82EXPORT_SYMBOL_GPL(save_stack_trace); 88EXPORT_SYMBOL_GPL(save_stack_trace);
83 89
84void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 90void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
@@ -86,6 +92,10 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
86 unsigned long sp, low, high; 92 unsigned long sp, low, high;
87 93
88 sp = tsk->thread.ksp; 94 sp = tsk->thread.ksp;
95 if (tsk == current) {
96 /* Get current stack pointer. */
97 asm volatile("la %0,0(15)" : "=a" (sp));
98 }
89 low = (unsigned long) task_stack_page(tsk); 99 low = (unsigned long) task_stack_page(tsk);
90 high = (unsigned long) task_pt_regs(tsk); 100 high = (unsigned long) task_pt_regs(tsk);
91 save_context_stack(trace, sp, low, high, 0); 101 save_context_stack(trace, sp, low, high, 0);
@@ -93,3 +103,14 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
93 trace->entries[trace->nr_entries++] = ULONG_MAX; 103 trace->entries[trace->nr_entries++] = ULONG_MAX;
94} 104}
95EXPORT_SYMBOL_GPL(save_stack_trace_tsk); 105EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
106
107void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
108{
109 unsigned long sp;
110
111 sp = kernel_stack_pointer(regs);
112 __save_stack_trace(trace, sp);
113 if (trace->nr_entries < trace->max_entries)
114 trace->entries[trace->nr_entries++] = ULONG_MAX;
115}
116EXPORT_SYMBOL_GPL(save_stack_trace_regs);
diff --git a/arch/s390/kernel/trace.c b/arch/s390/kernel/trace.c
index 21a5df99552b..dde7654f5c68 100644
--- a/arch/s390/kernel/trace.c
+++ b/arch/s390/kernel/trace.c
@@ -18,6 +18,9 @@ void trace_s390_diagnose_norecursion(int diag_nr)
18 unsigned long flags; 18 unsigned long flags;
19 unsigned int *depth; 19 unsigned int *depth;
20 20
21 /* Avoid lockdep recursion. */
22 if (IS_ENABLED(CONFIG_LOCKDEP))
23 return;
21 local_irq_save(flags); 24 local_irq_save(flags);
22 depth = this_cpu_ptr(&diagnose_trace_depth); 25 depth = this_cpu_ptr(&diagnose_trace_depth);
23 if (*depth == 0) { 26 if (*depth == 0) {
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index fec59c067d0d..792f9c63fbca 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -93,15 +93,19 @@ static int __memcpy_real(void *dest, void *src, size_t count)
93 */ 93 */
94int memcpy_real(void *dest, void *src, size_t count) 94int memcpy_real(void *dest, void *src, size_t count)
95{ 95{
96 int irqs_disabled, rc;
96 unsigned long flags; 97 unsigned long flags;
97 int rc;
98 98
99 if (!count) 99 if (!count)
100 return 0; 100 return 0;
101 local_irq_save(flags); 101 flags = __arch_local_irq_stnsm(0xf8UL);
102 __arch_local_irq_stnsm(0xfbUL); 102 irqs_disabled = arch_irqs_disabled_flags(flags);
103 if (!irqs_disabled)
104 trace_hardirqs_off();
103 rc = __memcpy_real(dest, src, count); 105 rc = __memcpy_real(dest, src, count);
104 local_irq_restore(flags); 106 if (!irqs_disabled)
107 trace_hardirqs_on();
108 __arch_local_irq_ssm(flags);
105 return rc; 109 return rc;
106} 110}
107 111
diff --git a/arch/s390/oprofile/backtrace.c b/arch/s390/oprofile/backtrace.c
index fe0bfe370c45..1884e1759529 100644
--- a/arch/s390/oprofile/backtrace.c
+++ b/arch/s390/oprofile/backtrace.c
@@ -54,12 +54,13 @@ __show_trace(unsigned int *depth, unsigned long sp,
54 54
55void s390_backtrace(struct pt_regs * const regs, unsigned int depth) 55void s390_backtrace(struct pt_regs * const regs, unsigned int depth)
56{ 56{
57 unsigned long head; 57 unsigned long head, frame_size;
58 struct stack_frame* head_sf; 58 struct stack_frame* head_sf;
59 59
60 if (user_mode(regs)) 60 if (user_mode(regs))
61 return; 61 return;
62 62
63 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
63 head = regs->gprs[15]; 64 head = regs->gprs[15];
64 head_sf = (struct stack_frame*)head; 65 head_sf = (struct stack_frame*)head;
65 66
@@ -68,8 +69,9 @@ void s390_backtrace(struct pt_regs * const regs, unsigned int depth)
68 69
69 head = head_sf->back_chain; 70 head = head_sf->back_chain;
70 71
71 head = __show_trace(&depth, head, S390_lowcore.async_stack - ASYNC_SIZE, 72 head = __show_trace(&depth, head,
72 S390_lowcore.async_stack); 73 S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
74 S390_lowcore.async_stack + frame_size);
73 75
74 __show_trace(&depth, head, S390_lowcore.thread_info, 76 __show_trace(&depth, head, S390_lowcore.thread_info,
75 S390_lowcore.thread_info + THREAD_SIZE); 77 S390_lowcore.thread_info + THREAD_SIZE);
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index eaee14637d93..8496a074bd0e 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -24,7 +24,13 @@ LDFLAGS := -m elf32_sparc
24export BITS := 32 24export BITS := 32
25UTS_MACHINE := sparc 25UTS_MACHINE := sparc
26 26
27# We are adding -Wa,-Av8 to KBUILD_CFLAGS to deal with a specs bug in some
28# versions of gcc. Some gcc versions won't pass -Av8 to binutils when you
29# give -mcpu=v8. This silently worked with older bintutils versions but
30# does not any more.
27KBUILD_CFLAGS += -m32 -mcpu=v8 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7 31KBUILD_CFLAGS += -m32 -mcpu=v8 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7
32KBUILD_CFLAGS += -Wa,-Av8
33
28KBUILD_AFLAGS += -m32 -Wa,-Av8 34KBUILD_AFLAGS += -m32 -Wa,-Av8
29 35
30else 36else
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index 1c26d440d288..b6de8b10a55b 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -422,8 +422,9 @@
422#define __NR_listen 354 422#define __NR_listen 354
423#define __NR_setsockopt 355 423#define __NR_setsockopt 355
424#define __NR_mlock2 356 424#define __NR_mlock2 356
425#define __NR_copy_file_range 357
425 426
426#define NR_syscalls 357 427#define NR_syscalls 358
427 428
428/* Bitmask values returned from kern_features system call. */ 429/* Bitmask values returned from kern_features system call. */
429#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 430#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 33c02b15f478..a83707c83be8 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -948,7 +948,24 @@ linux_syscall_trace:
948 cmp %o0, 0 948 cmp %o0, 0
949 bne 3f 949 bne 3f
950 mov -ENOSYS, %o0 950 mov -ENOSYS, %o0
951
952 /* Syscall tracing can modify the registers. */
953 ld [%sp + STACKFRAME_SZ + PT_G1], %g1
954 sethi %hi(sys_call_table), %l7
955 ld [%sp + STACKFRAME_SZ + PT_I0], %i0
956 or %l7, %lo(sys_call_table), %l7
957 ld [%sp + STACKFRAME_SZ + PT_I1], %i1
958 ld [%sp + STACKFRAME_SZ + PT_I2], %i2
959 ld [%sp + STACKFRAME_SZ + PT_I3], %i3
960 ld [%sp + STACKFRAME_SZ + PT_I4], %i4
961 ld [%sp + STACKFRAME_SZ + PT_I5], %i5
962 cmp %g1, NR_syscalls
963 bgeu 3f
964 mov -ENOSYS, %o0
965
966 sll %g1, 2, %l4
951 mov %i0, %o0 967 mov %i0, %o0
968 ld [%l7 + %l4], %l7
952 mov %i1, %o1 969 mov %i1, %o1
953 mov %i2, %o2 970 mov %i2, %o2
954 mov %i3, %o3 971 mov %i3, %o3
diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S
index afbaba52d2f1..d127130bf424 100644
--- a/arch/sparc/kernel/hvcalls.S
+++ b/arch/sparc/kernel/hvcalls.S
@@ -338,8 +338,9 @@ ENTRY(sun4v_mach_set_watchdog)
338 mov %o1, %o4 338 mov %o1, %o4
339 mov HV_FAST_MACH_SET_WATCHDOG, %o5 339 mov HV_FAST_MACH_SET_WATCHDOG, %o5
340 ta HV_FAST_TRAP 340 ta HV_FAST_TRAP
341 brnz,a,pn %o4, 0f
341 stx %o1, [%o4] 342 stx %o1, [%o4]
342 retl 3430: retl
343 nop 344 nop
344ENDPROC(sun4v_mach_set_watchdog) 345ENDPROC(sun4v_mach_set_watchdog)
345 346
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index d88beff47bab..39aaec173f66 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -52,7 +52,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs)
52 unsigned char fenab; 52 unsigned char fenab;
53 int err; 53 int err;
54 54
55 flush_user_windows(); 55 synchronize_user_stack();
56 if (get_thread_wsaved() || 56 if (get_thread_wsaved() ||
57 (((unsigned long)ucp) & (sizeof(unsigned long)-1)) || 57 (((unsigned long)ucp) & (sizeof(unsigned long)-1)) ||
58 (!__access_ok(ucp, sizeof(*ucp)))) 58 (!__access_ok(ucp, sizeof(*ucp))))
diff --git a/arch/sparc/kernel/sparc_ksyms_64.c b/arch/sparc/kernel/sparc_ksyms_64.c
index a92d5d2c46a3..9e034f29dcc5 100644
--- a/arch/sparc/kernel/sparc_ksyms_64.c
+++ b/arch/sparc/kernel/sparc_ksyms_64.c
@@ -37,6 +37,7 @@ EXPORT_SYMBOL(sun4v_niagara_getperf);
37EXPORT_SYMBOL(sun4v_niagara_setperf); 37EXPORT_SYMBOL(sun4v_niagara_setperf);
38EXPORT_SYMBOL(sun4v_niagara2_getperf); 38EXPORT_SYMBOL(sun4v_niagara2_getperf);
39EXPORT_SYMBOL(sun4v_niagara2_setperf); 39EXPORT_SYMBOL(sun4v_niagara2_setperf);
40EXPORT_SYMBOL(sun4v_mach_set_watchdog);
40 41
41/* from hweight.S */ 42/* from hweight.S */
42EXPORT_SYMBOL(__arch_hweight8); 43EXPORT_SYMBOL(__arch_hweight8);
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index c690c8e16a96..b489e9759518 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -264,7 +264,7 @@ static unsigned long mmap_rnd(void)
264 unsigned long rnd = 0UL; 264 unsigned long rnd = 0UL;
265 265
266 if (current->flags & PF_RANDOMIZE) { 266 if (current->flags & PF_RANDOMIZE) {
267 unsigned long val = get_random_int(); 267 unsigned long val = get_random_long();
268 if (test_thread_flag(TIF_32BIT)) 268 if (test_thread_flag(TIF_32BIT))
269 rnd = (val % (1UL << (23UL-PAGE_SHIFT))); 269 rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
270 else 270 else
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index bb0008927598..c4a1b5c40e4e 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -158,7 +158,25 @@ linux_syscall_trace32:
158 add %sp, PTREGS_OFF, %o0 158 add %sp, PTREGS_OFF, %o0
159 brnz,pn %o0, 3f 159 brnz,pn %o0, 3f
160 mov -ENOSYS, %o0 160 mov -ENOSYS, %o0
161
162 /* Syscall tracing can modify the registers. */
163 ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
164 sethi %hi(sys_call_table32), %l7
165 ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
166 or %l7, %lo(sys_call_table32), %l7
167 ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
168 ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
169 ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
170 ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
171 ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
172
173 cmp %g1, NR_syscalls
174 bgeu,pn %xcc, 3f
175 mov -ENOSYS, %o0
176
177 sll %g1, 2, %l4
161 srl %i0, 0, %o0 178 srl %i0, 0, %o0
179 lduw [%l7 + %l4], %l7
162 srl %i4, 0, %o4 180 srl %i4, 0, %o4
163 srl %i1, 0, %o1 181 srl %i1, 0, %o1
164 srl %i2, 0, %o2 182 srl %i2, 0, %o2
@@ -170,7 +188,25 @@ linux_syscall_trace:
170 add %sp, PTREGS_OFF, %o0 188 add %sp, PTREGS_OFF, %o0
171 brnz,pn %o0, 3f 189 brnz,pn %o0, 3f
172 mov -ENOSYS, %o0 190 mov -ENOSYS, %o0
191
192 /* Syscall tracing can modify the registers. */
193 ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
194 sethi %hi(sys_call_table64), %l7
195 ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
196 or %l7, %lo(sys_call_table64), %l7
197 ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
198 ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
199 ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
200 ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
201 ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
202
203 cmp %g1, NR_syscalls
204 bgeu,pn %xcc, 3f
205 mov -ENOSYS, %o0
206
207 sll %g1, 2, %l4
173 mov %i0, %o0 208 mov %i0, %o0
209 lduw [%l7 + %l4], %l7
174 mov %i1, %o1 210 mov %i1, %o1
175 mov %i2, %o2 211 mov %i2, %o2
176 mov %i3, %o3 212 mov %i3, %o3
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index e663b6c78de2..6c3dd6c52f8b 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -88,4 +88,4 @@ sys_call_table:
88/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 88/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
89/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf 89/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
90/*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen 90/*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
91/*355*/ .long sys_setsockopt, sys_mlock2 91/*355*/ .long sys_setsockopt, sys_mlock2, sys_copy_file_range
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 1557121f4cdc..12b524cfcfa0 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -89,7 +89,7 @@ sys_call_table32:
89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
90 .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf 90 .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
91/*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen 91/*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
92 .word compat_sys_setsockopt, sys_mlock2 92 .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range
93 93
94#endif /* CONFIG_COMPAT */ 94#endif /* CONFIG_COMPAT */
95 95
@@ -170,4 +170,4 @@ sys_call_table:
170/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 170/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
171 .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf 171 .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
172/*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen 172/*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
173 .word sys_setsockopt, sys_mlock2 173 .word sys_setsockopt, sys_mlock2, sys_copy_file_range
diff --git a/arch/um/kernel/reboot.c b/arch/um/kernel/reboot.c
index 9bdf67a092a5..b60a9f8cda75 100644
--- a/arch/um/kernel/reboot.c
+++ b/arch/um/kernel/reboot.c
@@ -12,6 +12,7 @@
12#include <skas.h> 12#include <skas.h>
13 13
14void (*pm_power_off)(void); 14void (*pm_power_off)(void);
15EXPORT_SYMBOL(pm_power_off);
15 16
16static void kill_off_processes(void) 17static void kill_off_processes(void)
17{ 18{
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index fc8be0e3a4ff..57acbd67d85d 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -69,7 +69,7 @@ void do_signal(struct pt_regs *regs)
69 struct ksignal ksig; 69 struct ksignal ksig;
70 int handled_sig = 0; 70 int handled_sig = 0;
71 71
72 if (get_signal(&ksig)) { 72 while (get_signal(&ksig)) {
73 handled_sig = 1; 73 handled_sig = 1;
74 /* Whee! Actually deliver the signal. */ 74 /* Whee! Actually deliver the signal. */
75 handle_signal(&ksig, regs); 75 handle_signal(&ksig, regs);
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 4c5228352744..66350e6a6ca5 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -294,6 +294,7 @@ sysenter_past_esp:
294 pushl $__USER_DS /* pt_regs->ss */ 294 pushl $__USER_DS /* pt_regs->ss */
295 pushl %ebp /* pt_regs->sp (stashed in bp) */ 295 pushl %ebp /* pt_regs->sp (stashed in bp) */
296 pushfl /* pt_regs->flags (except IF = 0) */ 296 pushfl /* pt_regs->flags (except IF = 0) */
297 ASM_CLAC /* Clear AC after saving FLAGS */
297 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ 298 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */
298 pushl $__USER_CS /* pt_regs->cs */ 299 pushl $__USER_CS /* pt_regs->cs */
299 pushl $0 /* pt_regs->ip = 0 (placeholder) */ 300 pushl $0 /* pt_regs->ip = 0 (placeholder) */
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index ff1c6d61f332..3c990eeee40b 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -261,6 +261,7 @@ ENTRY(entry_INT80_compat)
261 * Interrupts are off on entry. 261 * Interrupts are off on entry.
262 */ 262 */
263 PARAVIRT_ADJUST_EXCEPTION_FRAME 263 PARAVIRT_ADJUST_EXCEPTION_FRAME
264 ASM_CLAC /* Do this early to minimize exposure */
264 SWAPGS 265 SWAPGS
265 266
266 /* 267 /*
diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h
index 19c099afa861..e795f5274217 100644
--- a/arch/x86/include/asm/livepatch.h
+++ b/arch/x86/include/asm/livepatch.h
@@ -41,7 +41,7 @@ static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
41 regs->ip = ip; 41 regs->ip = ip;
42} 42}
43#else 43#else
44#error Live patching support is disabled; check CONFIG_LIVEPATCH 44#error Include linux/livepatch.h, not asm/livepatch.h
45#endif 45#endif
46 46
47#endif /* _ASM_X86_LIVEPATCH_H */ 47#endif /* _ASM_X86_LIVEPATCH_H */
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 46873fbd44e1..d08eacd298c2 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -93,6 +93,8 @@ extern raw_spinlock_t pci_config_lock;
93extern int (*pcibios_enable_irq)(struct pci_dev *dev); 93extern int (*pcibios_enable_irq)(struct pci_dev *dev);
94extern void (*pcibios_disable_irq)(struct pci_dev *dev); 94extern void (*pcibios_disable_irq)(struct pci_dev *dev);
95 95
96extern bool mp_should_keep_irq(struct device *dev);
97
96struct pci_raw_ops { 98struct pci_raw_ops {
97 int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, 99 int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
98 int reg, int len, u32 *val); 100 int reg, int len, u32 *val);
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index f5dcb5204dcd..3fe0eac59462 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -48,20 +48,28 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
48 48
49 switch (n) { 49 switch (n) {
50 case 1: 50 case 1:
51 __uaccess_begin();
51 __put_user_size(*(u8 *)from, (u8 __user *)to, 52 __put_user_size(*(u8 *)from, (u8 __user *)to,
52 1, ret, 1); 53 1, ret, 1);
54 __uaccess_end();
53 return ret; 55 return ret;
54 case 2: 56 case 2:
57 __uaccess_begin();
55 __put_user_size(*(u16 *)from, (u16 __user *)to, 58 __put_user_size(*(u16 *)from, (u16 __user *)to,
56 2, ret, 2); 59 2, ret, 2);
60 __uaccess_end();
57 return ret; 61 return ret;
58 case 4: 62 case 4:
63 __uaccess_begin();
59 __put_user_size(*(u32 *)from, (u32 __user *)to, 64 __put_user_size(*(u32 *)from, (u32 __user *)to,
60 4, ret, 4); 65 4, ret, 4);
66 __uaccess_end();
61 return ret; 67 return ret;
62 case 8: 68 case 8:
69 __uaccess_begin();
63 __put_user_size(*(u64 *)from, (u64 __user *)to, 70 __put_user_size(*(u64 *)from, (u64 __user *)to,
64 8, ret, 8); 71 8, ret, 8);
72 __uaccess_end();
65 return ret; 73 return ret;
66 } 74 }
67 } 75 }
@@ -103,13 +111,19 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
103 111
104 switch (n) { 112 switch (n) {
105 case 1: 113 case 1:
114 __uaccess_begin();
106 __get_user_size(*(u8 *)to, from, 1, ret, 1); 115 __get_user_size(*(u8 *)to, from, 1, ret, 1);
116 __uaccess_end();
107 return ret; 117 return ret;
108 case 2: 118 case 2:
119 __uaccess_begin();
109 __get_user_size(*(u16 *)to, from, 2, ret, 2); 120 __get_user_size(*(u16 *)to, from, 2, ret, 2);
121 __uaccess_end();
110 return ret; 122 return ret;
111 case 4: 123 case 4:
124 __uaccess_begin();
112 __get_user_size(*(u32 *)to, from, 4, ret, 4); 125 __get_user_size(*(u32 *)to, from, 4, ret, 4);
126 __uaccess_end();
113 return ret; 127 return ret;
114 } 128 }
115 } 129 }
@@ -148,13 +162,19 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
148 162
149 switch (n) { 163 switch (n) {
150 case 1: 164 case 1:
165 __uaccess_begin();
151 __get_user_size(*(u8 *)to, from, 1, ret, 1); 166 __get_user_size(*(u8 *)to, from, 1, ret, 1);
167 __uaccess_end();
152 return ret; 168 return ret;
153 case 2: 169 case 2:
170 __uaccess_begin();
154 __get_user_size(*(u16 *)to, from, 2, ret, 2); 171 __get_user_size(*(u16 *)to, from, 2, ret, 2);
172 __uaccess_end();
155 return ret; 173 return ret;
156 case 4: 174 case 4:
175 __uaccess_begin();
157 __get_user_size(*(u32 *)to, from, 4, ret, 4); 176 __get_user_size(*(u32 *)to, from, 4, ret, 4);
177 __uaccess_end();
158 return ret; 178 return ret;
159 } 179 }
160 } 180 }
@@ -170,13 +190,19 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
170 190
171 switch (n) { 191 switch (n) {
172 case 1: 192 case 1:
193 __uaccess_begin();
173 __get_user_size(*(u8 *)to, from, 1, ret, 1); 194 __get_user_size(*(u8 *)to, from, 1, ret, 1);
195 __uaccess_end();
174 return ret; 196 return ret;
175 case 2: 197 case 2:
198 __uaccess_begin();
176 __get_user_size(*(u16 *)to, from, 2, ret, 2); 199 __get_user_size(*(u16 *)to, from, 2, ret, 2);
200 __uaccess_end();
177 return ret; 201 return ret;
178 case 4: 202 case 4:
203 __uaccess_begin();
179 __get_user_size(*(u32 *)to, from, 4, ret, 4); 204 __get_user_size(*(u32 *)to, from, 4, ret, 4);
205 __uaccess_end();
180 return ret; 206 return ret;
181 } 207 }
182 } 208 }
diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h
index 968d57dd54c9..f320ee32d5a1 100644
--- a/arch/x86/include/asm/xen/pci.h
+++ b/arch/x86/include/asm/xen/pci.h
@@ -57,7 +57,7 @@ static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev,
57{ 57{
58 if (xen_pci_frontend && xen_pci_frontend->enable_msi) 58 if (xen_pci_frontend && xen_pci_frontend->enable_msi)
59 return xen_pci_frontend->enable_msi(dev, vectors); 59 return xen_pci_frontend->enable_msi(dev, vectors);
60 return -ENODEV; 60 return -ENOSYS;
61} 61}
62static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev) 62static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev)
63{ 63{
@@ -69,7 +69,7 @@ static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev,
69{ 69{
70 if (xen_pci_frontend && xen_pci_frontend->enable_msix) 70 if (xen_pci_frontend && xen_pci_frontend->enable_msix)
71 return xen_pci_frontend->enable_msix(dev, vectors, nvec); 71 return xen_pci_frontend->enable_msix(dev, vectors, nvec);
72 return -ENODEV; 72 return -ENOSYS;
73} 73}
74static inline void xen_pci_frontend_disable_msix(struct pci_dev *dev) 74static inline void xen_pci_frontend_disable_msix(struct pci_dev *dev)
75{ 75{
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index d1daead5fcdd..adb3eaf8fe2a 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -16,6 +16,7 @@
16#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
17#include <asm/realmode.h> 17#include <asm/realmode.h>
18 18
19#include <linux/ftrace.h>
19#include "../../realmode/rm/wakeup.h" 20#include "../../realmode/rm/wakeup.h"
20#include "sleep.h" 21#include "sleep.h"
21 22
@@ -107,7 +108,13 @@ int x86_acpi_suspend_lowlevel(void)
107 saved_magic = 0x123456789abcdef0L; 108 saved_magic = 0x123456789abcdef0L;
108#endif /* CONFIG_64BIT */ 109#endif /* CONFIG_64BIT */
109 110
111 /*
112 * Pause/unpause graph tracing around do_suspend_lowlevel as it has
113 * inconsistent call/return info after it jumps to the wakeup vector.
114 */
115 pause_graph_tracing();
110 do_suspend_lowlevel(); 116 do_suspend_lowlevel();
117 unpause_graph_tracing();
111 return 0; 118 return 0;
112} 119}
113 120
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
index 49742746a6c9..8836fc9fa84b 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -323,6 +323,8 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
323 return 0; 323 return 0;
324 324
325fail: 325fail:
326 if (amd_uncore_nb)
327 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
326 kfree(uncore_nb); 328 kfree(uncore_nb);
327 return -ENOMEM; 329 return -ENOMEM;
328} 330}
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 1505587d06e9..b9b09fec173b 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -650,10 +650,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
650 u16 sel; 650 u16 sel;
651 651
652 la = seg_base(ctxt, addr.seg) + addr.ea; 652 la = seg_base(ctxt, addr.seg) + addr.ea;
653 *linear = la;
654 *max_size = 0; 653 *max_size = 0;
655 switch (mode) { 654 switch (mode) {
656 case X86EMUL_MODE_PROT64: 655 case X86EMUL_MODE_PROT64:
656 *linear = la;
657 if (is_noncanonical_address(la)) 657 if (is_noncanonical_address(la))
658 goto bad; 658 goto bad;
659 659
@@ -662,6 +662,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
662 goto bad; 662 goto bad;
663 break; 663 break;
664 default: 664 default:
665 *linear = la = (u32)la;
665 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, 666 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
666 addr.seg); 667 addr.seg);
667 if (!usable) 668 if (!usable)
@@ -689,7 +690,6 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
689 if (size > *max_size) 690 if (size > *max_size)
690 goto bad; 691 goto bad;
691 } 692 }
692 la &= (u32)-1;
693 break; 693 break;
694 } 694 }
695 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0)) 695 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 6c9fed957cce..2ce4f05e81d3 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -249,7 +249,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
249 return ret; 249 return ret;
250 250
251 kvm_vcpu_mark_page_dirty(vcpu, table_gfn); 251 kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
252 walker->ptes[level] = pte; 252 walker->ptes[level - 1] = pte;
253 } 253 }
254 return 0; 254 return 0;
255} 255}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e2951b6edbbc..0ff453749a90 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -596,6 +596,8 @@ struct vcpu_vmx {
596 /* Support for PML */ 596 /* Support for PML */
597#define PML_ENTITY_NUM 512 597#define PML_ENTITY_NUM 512
598 struct page *pml_pg; 598 struct page *pml_pg;
599
600 u64 current_tsc_ratio;
599}; 601};
600 602
601enum segment_cache_field { 603enum segment_cache_field {
@@ -2127,14 +2129,16 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2127 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); 2129 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
2128 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ 2130 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
2129 2131
2130 /* Setup TSC multiplier */
2131 if (cpu_has_vmx_tsc_scaling())
2132 vmcs_write64(TSC_MULTIPLIER,
2133 vcpu->arch.tsc_scaling_ratio);
2134
2135 vmx->loaded_vmcs->cpu = cpu; 2132 vmx->loaded_vmcs->cpu = cpu;
2136 } 2133 }
2137 2134
2135 /* Setup TSC multiplier */
2136 if (kvm_has_tsc_control &&
2137 vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) {
2138 vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio;
2139 vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
2140 }
2141
2138 vmx_vcpu_pi_load(vcpu, cpu); 2142 vmx_vcpu_pi_load(vcpu, cpu);
2139} 2143}
2140 2144
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4244c2baf57d..eaf6ee8c28b8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6618,12 +6618,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6618 * KVM_DEBUGREG_WONT_EXIT again. 6618 * KVM_DEBUGREG_WONT_EXIT again.
6619 */ 6619 */
6620 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { 6620 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
6621 int i;
6622
6623 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); 6621 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
6624 kvm_x86_ops->sync_dirty_debug_regs(vcpu); 6622 kvm_x86_ops->sync_dirty_debug_regs(vcpu);
6625 for (i = 0; i < KVM_NR_DB_REGS; i++) 6623 kvm_update_dr0123(vcpu);
6626 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; 6624 kvm_update_dr6(vcpu);
6625 kvm_update_dr7(vcpu);
6626 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
6627 } 6627 }
6628 6628
6629 /* 6629 /*
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 6d5eb5900372..d8a798d8bf50 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -102,7 +102,6 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
102 return 0; 102 return 0;
103 } 103 }
104 104
105 page = pte_page(pte);
106 if (pte_devmap(pte)) { 105 if (pte_devmap(pte)) {
107 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); 106 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
108 if (unlikely(!pgmap)) { 107 if (unlikely(!pgmap)) {
@@ -115,6 +114,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
115 return 0; 114 return 0;
116 } 115 }
117 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 116 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
117 page = pte_page(pte);
118 get_page(page); 118 get_page(page);
119 put_dev_pagemap(pgmap); 119 put_dev_pagemap(pgmap);
120 SetPageReferenced(page); 120 SetPageReferenced(page);
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 96bd1e2bffaf..72bb52f93c3d 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -71,12 +71,12 @@ unsigned long arch_mmap_rnd(void)
71 71
72 if (mmap_is_ia32()) 72 if (mmap_is_ia32())
73#ifdef CONFIG_COMPAT 73#ifdef CONFIG_COMPAT
74 rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_compat_bits) - 1); 74 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
75#else 75#else
76 rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1); 76 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
77#endif 77#endif
78 else 78 else
79 rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1); 79 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
80 80
81 return rnd << PAGE_SHIFT; 81 return rnd << PAGE_SHIFT;
82} 82}
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index b2fd67da1701..ef05755a1900 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -123,7 +123,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
123 break; 123 break;
124 } 124 }
125 125
126 if (regno > nr_registers) { 126 if (regno >= nr_registers) {
127 WARN_ONCE(1, "decoded an instruction with an invalid register"); 127 WARN_ONCE(1, "decoded an instruction with an invalid register");
128 return -EINVAL; 128 return -EINVAL;
129 } 129 }
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 2440814b0069..9cf96d82147a 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -419,24 +419,30 @@ pmd_t *lookup_pmd_address(unsigned long address)
419phys_addr_t slow_virt_to_phys(void *__virt_addr) 419phys_addr_t slow_virt_to_phys(void *__virt_addr)
420{ 420{
421 unsigned long virt_addr = (unsigned long)__virt_addr; 421 unsigned long virt_addr = (unsigned long)__virt_addr;
422 unsigned long phys_addr, offset; 422 phys_addr_t phys_addr;
423 unsigned long offset;
423 enum pg_level level; 424 enum pg_level level;
424 pte_t *pte; 425 pte_t *pte;
425 426
426 pte = lookup_address(virt_addr, &level); 427 pte = lookup_address(virt_addr, &level);
427 BUG_ON(!pte); 428 BUG_ON(!pte);
428 429
430 /*
431 * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t
432 * before being left-shifted PAGE_SHIFT bits -- this trick is to
433 * make 32-PAE kernel work correctly.
434 */
429 switch (level) { 435 switch (level) {
430 case PG_LEVEL_1G: 436 case PG_LEVEL_1G:
431 phys_addr = pud_pfn(*(pud_t *)pte) << PAGE_SHIFT; 437 phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
432 offset = virt_addr & ~PUD_PAGE_MASK; 438 offset = virt_addr & ~PUD_PAGE_MASK;
433 break; 439 break;
434 case PG_LEVEL_2M: 440 case PG_LEVEL_2M:
435 phys_addr = pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT; 441 phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
436 offset = virt_addr & ~PMD_PAGE_MASK; 442 offset = virt_addr & ~PMD_PAGE_MASK;
437 break; 443 break;
438 default: 444 default:
439 phys_addr = pte_pfn(*pte) << PAGE_SHIFT; 445 phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
440 offset = virt_addr & ~PAGE_MASK; 446 offset = virt_addr & ~PAGE_MASK;
441 } 447 }
442 448
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 2879efc73a96..d34b5118b4e8 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -711,28 +711,22 @@ int pcibios_add_device(struct pci_dev *dev)
711 return 0; 711 return 0;
712} 712}
713 713
714int pcibios_alloc_irq(struct pci_dev *dev) 714int pcibios_enable_device(struct pci_dev *dev, int mask)
715{ 715{
716 /* 716 int err;
717 * If the PCI device was already claimed by core code and has
718 * MSI enabled, probing of the pcibios IRQ will overwrite
719 * dev->irq. So bail out if MSI is already enabled.
720 */
721 if (pci_dev_msi_enabled(dev))
722 return -EBUSY;
723 717
724 return pcibios_enable_irq(dev); 718 if ((err = pci_enable_resources(dev, mask)) < 0)
725} 719 return err;
726 720
727void pcibios_free_irq(struct pci_dev *dev) 721 if (!pci_dev_msi_enabled(dev))
728{ 722 return pcibios_enable_irq(dev);
729 if (pcibios_disable_irq) 723 return 0;
730 pcibios_disable_irq(dev);
731} 724}
732 725
733int pcibios_enable_device(struct pci_dev *dev, int mask) 726void pcibios_disable_device (struct pci_dev *dev)
734{ 727{
735 return pci_enable_resources(dev, mask); 728 if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
729 pcibios_disable_irq(dev);
736} 730}
737 731
738int pci_ext_cfg_avail(void) 732int pci_ext_cfg_avail(void)
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 0d24e7c10145..8b93e634af84 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -215,7 +215,7 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
215 int polarity; 215 int polarity;
216 int ret; 216 int ret;
217 217
218 if (pci_has_managed_irq(dev)) 218 if (dev->irq_managed && dev->irq > 0)
219 return 0; 219 return 0;
220 220
221 switch (intel_mid_identify_cpu()) { 221 switch (intel_mid_identify_cpu()) {
@@ -256,13 +256,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
256 256
257static void intel_mid_pci_irq_disable(struct pci_dev *dev) 257static void intel_mid_pci_irq_disable(struct pci_dev *dev)
258{ 258{
259 if (pci_has_managed_irq(dev)) { 259 if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed &&
260 dev->irq > 0) {
260 mp_unmap_irq(dev->irq); 261 mp_unmap_irq(dev->irq);
261 dev->irq_managed = 0; 262 dev->irq_managed = 0;
262 /*
263 * Don't reset dev->irq here, otherwise
264 * intel_mid_pci_irq_enable() will fail on next call.
265 */
266 } 263 }
267} 264}
268 265
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 32e70343e6fd..9bd115484745 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -1202,7 +1202,7 @@ static int pirq_enable_irq(struct pci_dev *dev)
1202 struct pci_dev *temp_dev; 1202 struct pci_dev *temp_dev;
1203 int irq; 1203 int irq;
1204 1204
1205 if (pci_has_managed_irq(dev)) 1205 if (dev->irq_managed && dev->irq > 0)
1206 return 0; 1206 return 0;
1207 1207
1208 irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, 1208 irq = IO_APIC_get_PCI_irq_vector(dev->bus->number,
@@ -1230,7 +1230,8 @@ static int pirq_enable_irq(struct pci_dev *dev)
1230 } 1230 }
1231 dev = temp_dev; 1231 dev = temp_dev;
1232 if (irq >= 0) { 1232 if (irq >= 0) {
1233 pci_set_managed_irq(dev, irq); 1233 dev->irq_managed = 1;
1234 dev->irq = irq;
1234 dev_info(&dev->dev, "PCI->APIC IRQ transform: " 1235 dev_info(&dev->dev, "PCI->APIC IRQ transform: "
1235 "INT %c -> IRQ %d\n", 'A' + pin - 1, irq); 1236 "INT %c -> IRQ %d\n", 'A' + pin - 1, irq);
1236 return 0; 1237 return 0;
@@ -1256,10 +1257,24 @@ static int pirq_enable_irq(struct pci_dev *dev)
1256 return 0; 1257 return 0;
1257} 1258}
1258 1259
1260bool mp_should_keep_irq(struct device *dev)
1261{
1262 if (dev->power.is_prepared)
1263 return true;
1264#ifdef CONFIG_PM
1265 if (dev->power.runtime_status == RPM_SUSPENDING)
1266 return true;
1267#endif
1268
1269 return false;
1270}
1271
1259static void pirq_disable_irq(struct pci_dev *dev) 1272static void pirq_disable_irq(struct pci_dev *dev)
1260{ 1273{
1261 if (io_apic_assign_pci_irqs && pci_has_managed_irq(dev)) { 1274 if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) &&
1275 dev->irq_managed && dev->irq) {
1262 mp_unmap_irq(dev->irq); 1276 mp_unmap_irq(dev->irq);
1263 pci_reset_managed_irq(dev); 1277 dev->irq = 0;
1278 dev->irq_managed = 0;
1264 } 1279 }
1265} 1280}
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index ff31ab464213..beac4dfdade6 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -196,7 +196,10 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
196 return 0; 196 return 0;
197 197
198error: 198error:
199 dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n"); 199 if (ret == -ENOSYS)
200 dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
201 else if (ret)
202 dev_err(&dev->dev, "Xen PCI frontend error: %d!\n", ret);
200free: 203free:
201 kfree(v); 204 kfree(v);
202 return ret; 205 return ret;
diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c
index c61b6c332e97..bfadcd0f4944 100644
--- a/arch/x86/platform/intel-quark/imr.c
+++ b/arch/x86/platform/intel-quark/imr.c
@@ -592,14 +592,14 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
592 end = (unsigned long)__end_rodata - 1; 592 end = (unsigned long)__end_rodata - 1;
593 593
594 /* 594 /*
595 * Setup a locked IMR around the physical extent of the kernel 595 * Setup an unlocked IMR around the physical extent of the kernel
596 * from the beginning of the .text secton to the end of the 596 * from the beginning of the .text secton to the end of the
597 * .rodata section as one physically contiguous block. 597 * .rodata section as one physically contiguous block.
598 * 598 *
599 * We don't round up @size since it is already PAGE_SIZE aligned. 599 * We don't round up @size since it is already PAGE_SIZE aligned.
600 * See vmlinux.lds.S for details. 600 * See vmlinux.lds.S for details.
601 */ 601 */
602 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true); 602 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
603 if (ret < 0) { 603 if (ret < 0) {
604 pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n", 604 pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n",
605 size / 1024, start, end); 605 size / 1024, start, end);
diff --git a/arch/x86/um/os-Linux/task_size.c b/arch/x86/um/os-Linux/task_size.c
index 8502ad30e61b..5adb6a2fd117 100644
--- a/arch/x86/um/os-Linux/task_size.c
+++ b/arch/x86/um/os-Linux/task_size.c
@@ -109,7 +109,7 @@ unsigned long os_get_top_address(void)
109 exit(1); 109 exit(1);
110 } 110 }
111 111
112 printf("0x%x\n", bottom << UM_KERN_PAGE_SHIFT); 112 printf("0x%lx\n", bottom << UM_KERN_PAGE_SHIFT);
113 printf("Locating the top of the address space ... "); 113 printf("Locating the top of the address space ... ");
114 fflush(stdout); 114 fflush(stdout);
115 115
@@ -134,7 +134,7 @@ out:
134 exit(1); 134 exit(1);
135 } 135 }
136 top <<= UM_KERN_PAGE_SHIFT; 136 top <<= UM_KERN_PAGE_SHIFT;
137 printf("0x%x\n", top); 137 printf("0x%lx\n", top);
138 138
139 return top; 139 return top;
140} 140}
diff --git a/block/Kconfig b/block/Kconfig
index 161491d0a879..0363cd731320 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -88,6 +88,19 @@ config BLK_DEV_INTEGRITY
88 T10/SCSI Data Integrity Field or the T13/ATA External Path 88 T10/SCSI Data Integrity Field or the T13/ATA External Path
89 Protection. If in doubt, say N. 89 Protection. If in doubt, say N.
90 90
91config BLK_DEV_DAX
92 bool "Block device DAX support"
93 depends on FS_DAX
94 depends on BROKEN
95 help
96 When DAX support is available (CONFIG_FS_DAX) raw block
97 devices can also support direct userspace access to the
98 storage capacity via MMAP(2) similar to a file on a
99 DAX-enabled filesystem. However, the DAX I/O-path disables
100 some standard I/O-statistics, and the MMAP(2) path has some
101 operational differences due to bypassing the page
102 cache. If in doubt, say N.
103
91config BLK_DEV_THROTTLING 104config BLK_DEV_THROTTLING
92 bool "Block layer bio throttling support" 105 bool "Block layer bio throttling support"
93 depends on BLK_CGROUP=y 106 depends on BLK_CGROUP=y
diff --git a/block/bio.c b/block/bio.c
index dbabd48b1934..cf7591551b17 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -874,7 +874,7 @@ int submit_bio_wait(int rw, struct bio *bio)
874 bio->bi_private = &ret; 874 bio->bi_private = &ret;
875 bio->bi_end_io = submit_bio_wait_endio; 875 bio->bi_end_io = submit_bio_wait_endio;
876 submit_bio(rw, bio); 876 submit_bio(rw, bio);
877 wait_for_completion(&ret.event); 877 wait_for_completion_io(&ret.event);
878 878
879 return ret.error; 879 return ret.error;
880} 880}
@@ -1090,9 +1090,12 @@ int bio_uncopy_user(struct bio *bio)
1090 if (!bio_flagged(bio, BIO_NULL_MAPPED)) { 1090 if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1091 /* 1091 /*
1092 * if we're in a workqueue, the request is orphaned, so 1092 * if we're in a workqueue, the request is orphaned, so
1093 * don't copy into a random user address space, just free. 1093 * don't copy into a random user address space, just free
1094 * and return -EINTR so user space doesn't expect any data.
1094 */ 1095 */
1095 if (current->mm && bio_data_dir(bio) == READ) 1096 if (!current->mm)
1097 ret = -EINTR;
1098 else if (bio_data_dir(bio) == READ)
1096 ret = bio_copy_to_iter(bio, bmd->iter); 1099 ret = bio_copy_to_iter(bio, bmd->iter);
1097 if (bmd->is_our_pages) 1100 if (bmd->is_our_pages)
1098 bio_free_pages(bio); 1101 bio_free_pages(bio);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 5a37188b559f..66e6f1aae02e 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -788,6 +788,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
788{ 788{
789 struct gendisk *disk; 789 struct gendisk *disk;
790 struct blkcg_gq *blkg; 790 struct blkcg_gq *blkg;
791 struct module *owner;
791 unsigned int major, minor; 792 unsigned int major, minor;
792 int key_len, part, ret; 793 int key_len, part, ret;
793 char *body; 794 char *body;
@@ -804,7 +805,9 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
804 if (!disk) 805 if (!disk)
805 return -ENODEV; 806 return -ENODEV;
806 if (part) { 807 if (part) {
808 owner = disk->fops->owner;
807 put_disk(disk); 809 put_disk(disk);
810 module_put(owner);
808 return -ENODEV; 811 return -ENODEV;
809 } 812 }
810 813
@@ -820,7 +823,9 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
820 ret = PTR_ERR(blkg); 823 ret = PTR_ERR(blkg);
821 rcu_read_unlock(); 824 rcu_read_unlock();
822 spin_unlock_irq(disk->queue->queue_lock); 825 spin_unlock_irq(disk->queue->queue_lock);
826 owner = disk->fops->owner;
823 put_disk(disk); 827 put_disk(disk);
828 module_put(owner);
824 /* 829 /*
825 * If queue was bypassing, we should retry. Do so after a 830 * If queue was bypassing, we should retry. Do so after a
826 * short msleep(). It isn't strictly necessary but queue 831 * short msleep(). It isn't strictly necessary but queue
@@ -851,9 +856,13 @@ EXPORT_SYMBOL_GPL(blkg_conf_prep);
851void blkg_conf_finish(struct blkg_conf_ctx *ctx) 856void blkg_conf_finish(struct blkg_conf_ctx *ctx)
852 __releases(ctx->disk->queue->queue_lock) __releases(rcu) 857 __releases(ctx->disk->queue->queue_lock) __releases(rcu)
853{ 858{
859 struct module *owner;
860
854 spin_unlock_irq(ctx->disk->queue->queue_lock); 861 spin_unlock_irq(ctx->disk->queue->queue_lock);
855 rcu_read_unlock(); 862 rcu_read_unlock();
863 owner = ctx->disk->fops->owner;
856 put_disk(ctx->disk); 864 put_disk(ctx->disk);
865 module_put(owner);
857} 866}
858EXPORT_SYMBOL_GPL(blkg_conf_finish); 867EXPORT_SYMBOL_GPL(blkg_conf_finish);
859 868
diff --git a/block/blk-map.c b/block/blk-map.c
index f565e11f465a..a54f0543b956 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -57,6 +57,49 @@ static int __blk_rq_unmap_user(struct bio *bio)
57 return ret; 57 return ret;
58} 58}
59 59
60static int __blk_rq_map_user_iov(struct request *rq,
61 struct rq_map_data *map_data, struct iov_iter *iter,
62 gfp_t gfp_mask, bool copy)
63{
64 struct request_queue *q = rq->q;
65 struct bio *bio, *orig_bio;
66 int ret;
67
68 if (copy)
69 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
70 else
71 bio = bio_map_user_iov(q, iter, gfp_mask);
72
73 if (IS_ERR(bio))
74 return PTR_ERR(bio);
75
76 if (map_data && map_data->null_mapped)
77 bio_set_flag(bio, BIO_NULL_MAPPED);
78
79 iov_iter_advance(iter, bio->bi_iter.bi_size);
80 if (map_data)
81 map_data->offset += bio->bi_iter.bi_size;
82
83 orig_bio = bio;
84 blk_queue_bounce(q, &bio);
85
86 /*
87 * We link the bounce buffer in and could have to traverse it
88 * later so we have to get a ref to prevent it from being freed
89 */
90 bio_get(bio);
91
92 ret = blk_rq_append_bio(q, rq, bio);
93 if (ret) {
94 bio_endio(bio);
95 __blk_rq_unmap_user(orig_bio);
96 bio_put(bio);
97 return ret;
98 }
99
100 return 0;
101}
102
60/** 103/**
61 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage 104 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
62 * @q: request queue where request should be inserted 105 * @q: request queue where request should be inserted
@@ -82,10 +125,11 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
82 struct rq_map_data *map_data, 125 struct rq_map_data *map_data,
83 const struct iov_iter *iter, gfp_t gfp_mask) 126 const struct iov_iter *iter, gfp_t gfp_mask)
84{ 127{
85 struct bio *bio;
86 int unaligned = 0;
87 struct iov_iter i;
88 struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0}; 128 struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
129 bool copy = (q->dma_pad_mask & iter->count) || map_data;
130 struct bio *bio = NULL;
131 struct iov_iter i;
132 int ret;
89 133
90 if (!iter || !iter->count) 134 if (!iter || !iter->count)
91 return -EINVAL; 135 return -EINVAL;
@@ -101,42 +145,29 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
101 */ 145 */
102 if ((uaddr & queue_dma_alignment(q)) || 146 if ((uaddr & queue_dma_alignment(q)) ||
103 iovec_gap_to_prv(q, &prv, &iov)) 147 iovec_gap_to_prv(q, &prv, &iov))
104 unaligned = 1; 148 copy = true;
105 149
106 prv.iov_base = iov.iov_base; 150 prv.iov_base = iov.iov_base;
107 prv.iov_len = iov.iov_len; 151 prv.iov_len = iov.iov_len;
108 } 152 }
109 153
110 if (unaligned || (q->dma_pad_mask & iter->count) || map_data) 154 i = *iter;
111 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); 155 do {
112 else 156 ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
113 bio = bio_map_user_iov(q, iter, gfp_mask); 157 if (ret)
114 158 goto unmap_rq;
115 if (IS_ERR(bio)) 159 if (!bio)
116 return PTR_ERR(bio); 160 bio = rq->bio;
117 161 } while (iov_iter_count(&i));
118 if (map_data && map_data->null_mapped)
119 bio_set_flag(bio, BIO_NULL_MAPPED);
120
121 if (bio->bi_iter.bi_size != iter->count) {
122 /*
123 * Grab an extra reference to this bio, as bio_unmap_user()
124 * expects to be able to drop it twice as it happens on the
125 * normal IO completion path
126 */
127 bio_get(bio);
128 bio_endio(bio);
129 __blk_rq_unmap_user(bio);
130 return -EINVAL;
131 }
132 162
133 if (!bio_flagged(bio, BIO_USER_MAPPED)) 163 if (!bio_flagged(bio, BIO_USER_MAPPED))
134 rq->cmd_flags |= REQ_COPY_USER; 164 rq->cmd_flags |= REQ_COPY_USER;
135
136 blk_queue_bounce(q, &bio);
137 bio_get(bio);
138 blk_rq_bio_prep(q, rq, bio);
139 return 0; 165 return 0;
166
167unmap_rq:
168 __blk_rq_unmap_user(bio);
169 rq->bio = NULL;
170 return -EINVAL;
140} 171}
141EXPORT_SYMBOL(blk_rq_map_user_iov); 172EXPORT_SYMBOL(blk_rq_map_user_iov);
142 173
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 888a7fec81f7..261353166dcf 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -304,7 +304,6 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
304 struct bio *nxt) 304 struct bio *nxt)
305{ 305{
306 struct bio_vec end_bv = { NULL }, nxt_bv; 306 struct bio_vec end_bv = { NULL }, nxt_bv;
307 struct bvec_iter iter;
308 307
309 if (!blk_queue_cluster(q)) 308 if (!blk_queue_cluster(q))
310 return 0; 309 return 0;
@@ -316,11 +315,8 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
316 if (!bio_has_data(bio)) 315 if (!bio_has_data(bio))
317 return 1; 316 return 1;
318 317
319 bio_for_each_segment(end_bv, bio, iter) 318 bio_get_last_bvec(bio, &end_bv);
320 if (end_bv.bv_len == iter.bi_size) 319 bio_get_first_bvec(nxt, &nxt_bv);
321 break;
322
323 nxt_bv = bio_iovec(nxt);
324 320
325 if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv)) 321 if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
326 return 0; 322 return 0;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4c0622fae413..56c0a726b619 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -599,8 +599,10 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
599 * If a request wasn't started before the queue was 599 * If a request wasn't started before the queue was
600 * marked dying, kill it here or it'll go unnoticed. 600 * marked dying, kill it here or it'll go unnoticed.
601 */ 601 */
602 if (unlikely(blk_queue_dying(rq->q))) 602 if (unlikely(blk_queue_dying(rq->q))) {
603 blk_mq_complete_request(rq, -EIO); 603 rq->errors = -EIO;
604 blk_mq_end_request(rq, rq->errors);
605 }
604 return; 606 return;
605 } 607 }
606 608
diff --git a/block/blk-settings.c b/block/blk-settings.c
index dd4973583978..c7bb666aafd1 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -91,8 +91,8 @@ void blk_set_default_limits(struct queue_limits *lim)
91 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 91 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
92 lim->virt_boundary_mask = 0; 92 lim->virt_boundary_mask = 0;
93 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; 93 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
94 lim->max_sectors = lim->max_dev_sectors = lim->max_hw_sectors = 94 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
95 BLK_SAFE_MAX_SECTORS; 95 lim->max_dev_sectors = 0;
96 lim->chunk_sectors = 0; 96 lim->chunk_sectors = 0;
97 lim->max_write_same_sectors = 0; 97 lim->max_write_same_sectors = 0;
98 lim->max_discard_sectors = 0; 98 lim->max_discard_sectors = 0;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index e140cc487ce1..dd93763057ce 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -147,10 +147,9 @@ static ssize_t queue_discard_granularity_show(struct request_queue *q, char *pag
147 147
148static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) 148static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
149{ 149{
150 unsigned long long val;
151 150
152 val = q->limits.max_hw_discard_sectors << 9; 151 return sprintf(page, "%llu\n",
153 return sprintf(page, "%llu\n", val); 152 (unsigned long long)q->limits.max_hw_discard_sectors << 9);
154} 153}
155 154
156static ssize_t queue_discard_max_show(struct request_queue *q, char *page) 155static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index a753df2b3fc2..d0dd7882d8c7 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -39,7 +39,6 @@ struct deadline_data {
39 */ 39 */
40 struct request *next_rq[2]; 40 struct request *next_rq[2];
41 unsigned int batching; /* number of sequential requests made */ 41 unsigned int batching; /* number of sequential requests made */
42 sector_t last_sector; /* head position */
43 unsigned int starved; /* times reads have starved writes */ 42 unsigned int starved; /* times reads have starved writes */
44 43
45 /* 44 /*
@@ -210,8 +209,6 @@ deadline_move_request(struct deadline_data *dd, struct request *rq)
210 dd->next_rq[WRITE] = NULL; 209 dd->next_rq[WRITE] = NULL;
211 dd->next_rq[data_dir] = deadline_latter_request(rq); 210 dd->next_rq[data_dir] = deadline_latter_request(rq);
212 211
213 dd->last_sector = rq_end_sector(rq);
214
215 /* 212 /*
216 * take it off the sort and fifo list, move 213 * take it off the sort and fifo list, move
217 * to dispatch queue 214 * to dispatch queue
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index ad6d8c6b777e..35947ac87644 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -469,37 +469,16 @@ static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
469 nfit_mem->bdw = NULL; 469 nfit_mem->bdw = NULL;
470} 470}
471 471
472static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc, 472static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
473 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) 473 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
474{ 474{
475 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; 475 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
476 struct nfit_memdev *nfit_memdev; 476 struct nfit_memdev *nfit_memdev;
477 struct nfit_flush *nfit_flush; 477 struct nfit_flush *nfit_flush;
478 struct nfit_dcr *nfit_dcr;
479 struct nfit_bdw *nfit_bdw; 478 struct nfit_bdw *nfit_bdw;
480 struct nfit_idt *nfit_idt; 479 struct nfit_idt *nfit_idt;
481 u16 idt_idx, range_index; 480 u16 idt_idx, range_index;
482 481
483 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
484 if (nfit_dcr->dcr->region_index != dcr)
485 continue;
486 nfit_mem->dcr = nfit_dcr->dcr;
487 break;
488 }
489
490 if (!nfit_mem->dcr) {
491 dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n",
492 spa->range_index, __to_nfit_memdev(nfit_mem)
493 ? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR");
494 return -ENODEV;
495 }
496
497 /*
498 * We've found enough to create an nvdimm, optionally
499 * find an associated BDW
500 */
501 list_add(&nfit_mem->list, &acpi_desc->dimms);
502
503 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { 482 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
504 if (nfit_bdw->bdw->region_index != dcr) 483 if (nfit_bdw->bdw->region_index != dcr)
505 continue; 484 continue;
@@ -508,12 +487,12 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
508 } 487 }
509 488
510 if (!nfit_mem->bdw) 489 if (!nfit_mem->bdw)
511 return 0; 490 return;
512 491
513 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); 492 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
514 493
515 if (!nfit_mem->spa_bdw) 494 if (!nfit_mem->spa_bdw)
516 return 0; 495 return;
517 496
518 range_index = nfit_mem->spa_bdw->range_index; 497 range_index = nfit_mem->spa_bdw->range_index;
519 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 498 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
@@ -538,8 +517,6 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
538 } 517 }
539 break; 518 break;
540 } 519 }
541
542 return 0;
543} 520}
544 521
545static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, 522static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
@@ -548,7 +525,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
548 struct nfit_mem *nfit_mem, *found; 525 struct nfit_mem *nfit_mem, *found;
549 struct nfit_memdev *nfit_memdev; 526 struct nfit_memdev *nfit_memdev;
550 int type = nfit_spa_type(spa); 527 int type = nfit_spa_type(spa);
551 u16 dcr;
552 528
553 switch (type) { 529 switch (type) {
554 case NFIT_SPA_DCR: 530 case NFIT_SPA_DCR:
@@ -559,14 +535,18 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
559 } 535 }
560 536
561 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 537 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
562 int rc; 538 struct nfit_dcr *nfit_dcr;
539 u32 device_handle;
540 u16 dcr;
563 541
564 if (nfit_memdev->memdev->range_index != spa->range_index) 542 if (nfit_memdev->memdev->range_index != spa->range_index)
565 continue; 543 continue;
566 found = NULL; 544 found = NULL;
567 dcr = nfit_memdev->memdev->region_index; 545 dcr = nfit_memdev->memdev->region_index;
546 device_handle = nfit_memdev->memdev->device_handle;
568 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 547 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
569 if (__to_nfit_memdev(nfit_mem)->region_index == dcr) { 548 if (__to_nfit_memdev(nfit_mem)->device_handle
549 == device_handle) {
570 found = nfit_mem; 550 found = nfit_mem;
571 break; 551 break;
572 } 552 }
@@ -579,6 +559,31 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
579 if (!nfit_mem) 559 if (!nfit_mem)
580 return -ENOMEM; 560 return -ENOMEM;
581 INIT_LIST_HEAD(&nfit_mem->list); 561 INIT_LIST_HEAD(&nfit_mem->list);
562 list_add(&nfit_mem->list, &acpi_desc->dimms);
563 }
564
565 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
566 if (nfit_dcr->dcr->region_index != dcr)
567 continue;
568 /*
569 * Record the control region for the dimm. For
570 * the ACPI 6.1 case, where there are separate
571 * control regions for the pmem vs blk
572 * interfaces, be sure to record the extended
573 * blk details.
574 */
575 if (!nfit_mem->dcr)
576 nfit_mem->dcr = nfit_dcr->dcr;
577 else if (nfit_mem->dcr->windows == 0
578 && nfit_dcr->dcr->windows)
579 nfit_mem->dcr = nfit_dcr->dcr;
580 break;
581 }
582
583 if (dcr && !nfit_mem->dcr) {
584 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
585 spa->range_index, dcr);
586 return -ENODEV;
582 } 587 }
583 588
584 if (type == NFIT_SPA_DCR) { 589 if (type == NFIT_SPA_DCR) {
@@ -595,6 +600,7 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
595 nfit_mem->idt_dcr = nfit_idt->idt; 600 nfit_mem->idt_dcr = nfit_idt->idt;
596 break; 601 break;
597 } 602 }
603 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
598 } else { 604 } else {
599 /* 605 /*
600 * A single dimm may belong to multiple SPA-PM 606 * A single dimm may belong to multiple SPA-PM
@@ -603,13 +609,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
603 */ 609 */
604 nfit_mem->memdev_pmem = nfit_memdev->memdev; 610 nfit_mem->memdev_pmem = nfit_memdev->memdev;
605 } 611 }
606
607 if (found)
608 continue;
609
610 rc = nfit_mem_add(acpi_desc, nfit_mem, spa);
611 if (rc)
612 return rc;
613 } 612 }
614 613
615 return 0; 614 return 0;
@@ -1504,9 +1503,7 @@ static int ars_do_start(struct nvdimm_bus_descriptor *nd_desc,
1504 case 1: 1503 case 1:
1505 /* ARS unsupported, but we should never get here */ 1504 /* ARS unsupported, but we should never get here */
1506 return 0; 1505 return 0;
1507 case 2: 1506 case 6:
1508 return -EINVAL;
1509 case 3:
1510 /* ARS is in progress */ 1507 /* ARS is in progress */
1511 msleep(1000); 1508 msleep(1000);
1512 break; 1509 break;
@@ -1517,13 +1514,13 @@ static int ars_do_start(struct nvdimm_bus_descriptor *nd_desc,
1517} 1514}
1518 1515
1519static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc, 1516static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc,
1520 struct nd_cmd_ars_status *cmd) 1517 struct nd_cmd_ars_status *cmd, u32 size)
1521{ 1518{
1522 int rc; 1519 int rc;
1523 1520
1524 while (1) { 1521 while (1) {
1525 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, cmd, 1522 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, cmd,
1526 sizeof(*cmd)); 1523 size);
1527 if (rc || cmd->status & 0xffff) 1524 if (rc || cmd->status & 0xffff)
1528 return -ENXIO; 1525 return -ENXIO;
1529 1526
@@ -1538,6 +1535,8 @@ static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc,
1538 case 2: 1535 case 2:
1539 /* No ARS performed for the current boot */ 1536 /* No ARS performed for the current boot */
1540 return 0; 1537 return 0;
1538 case 3:
1539 /* TODO: error list overflow support */
1541 default: 1540 default:
1542 return -ENXIO; 1541 return -ENXIO;
1543 } 1542 }
@@ -1581,6 +1580,7 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
1581 struct nd_cmd_ars_start *ars_start = NULL; 1580 struct nd_cmd_ars_start *ars_start = NULL;
1582 struct nd_cmd_ars_cap *ars_cap = NULL; 1581 struct nd_cmd_ars_cap *ars_cap = NULL;
1583 u64 start, len, cur, remaining; 1582 u64 start, len, cur, remaining;
1583 u32 ars_status_size;
1584 int rc; 1584 int rc;
1585 1585
1586 ars_cap = kzalloc(sizeof(*ars_cap), GFP_KERNEL); 1586 ars_cap = kzalloc(sizeof(*ars_cap), GFP_KERNEL);
@@ -1590,14 +1590,21 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
1590 start = ndr_desc->res->start; 1590 start = ndr_desc->res->start;
1591 len = ndr_desc->res->end - ndr_desc->res->start + 1; 1591 len = ndr_desc->res->end - ndr_desc->res->start + 1;
1592 1592
1593 /*
1594 * If ARS is unimplemented, unsupported, or if the 'Persistent Memory
1595 * Scrub' flag in extended status is not set, skip this but continue
1596 * initialization
1597 */
1593 rc = ars_get_cap(nd_desc, ars_cap, start, len); 1598 rc = ars_get_cap(nd_desc, ars_cap, start, len);
1599 if (rc == -ENOTTY) {
1600 dev_dbg(acpi_desc->dev,
1601 "Address Range Scrub is not implemented, won't create an error list\n");
1602 rc = 0;
1603 goto out;
1604 }
1594 if (rc) 1605 if (rc)
1595 goto out; 1606 goto out;
1596 1607
1597 /*
1598 * If ARS is unsupported, or if the 'Persistent Memory Scrub' flag in
1599 * extended status is not set, skip this but continue initialization
1600 */
1601 if ((ars_cap->status & 0xffff) || 1608 if ((ars_cap->status & 0xffff) ||
1602 !(ars_cap->status >> 16 & ND_ARS_PERSISTENT)) { 1609 !(ars_cap->status >> 16 & ND_ARS_PERSISTENT)) {
1603 dev_warn(acpi_desc->dev, 1610 dev_warn(acpi_desc->dev,
@@ -1610,14 +1617,14 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
1610 * Check if a full-range ARS has been run. If so, use those results 1617 * Check if a full-range ARS has been run. If so, use those results
1611 * without having to start a new ARS. 1618 * without having to start a new ARS.
1612 */ 1619 */
1613 ars_status = kzalloc(ars_cap->max_ars_out + sizeof(*ars_status), 1620 ars_status_size = ars_cap->max_ars_out;
1614 GFP_KERNEL); 1621 ars_status = kzalloc(ars_status_size, GFP_KERNEL);
1615 if (!ars_status) { 1622 if (!ars_status) {
1616 rc = -ENOMEM; 1623 rc = -ENOMEM;
1617 goto out; 1624 goto out;
1618 } 1625 }
1619 1626
1620 rc = ars_get_status(nd_desc, ars_status); 1627 rc = ars_get_status(nd_desc, ars_status, ars_status_size);
1621 if (rc) 1628 if (rc)
1622 goto out; 1629 goto out;
1623 1630
@@ -1647,7 +1654,7 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
1647 if (rc) 1654 if (rc)
1648 goto out; 1655 goto out;
1649 1656
1650 rc = ars_get_status(nd_desc, ars_status); 1657 rc = ars_get_status(nd_desc, ars_status, ars_status_size);
1651 if (rc) 1658 if (rc)
1652 goto out; 1659 goto out;
1653 1660
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index d30184c7f3bc..c8e169e46673 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -406,7 +406,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
406 return 0; 406 return 0;
407 } 407 }
408 408
409 if (pci_has_managed_irq(dev)) 409 if (dev->irq_managed && dev->irq > 0)
410 return 0; 410 return 0;
411 411
412 entry = acpi_pci_irq_lookup(dev, pin); 412 entry = acpi_pci_irq_lookup(dev, pin);
@@ -451,7 +451,8 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
451 kfree(entry); 451 kfree(entry);
452 return rc; 452 return rc;
453 } 453 }
454 pci_set_managed_irq(dev, rc); 454 dev->irq = rc;
455 dev->irq_managed = 1;
455 456
456 if (link) 457 if (link)
457 snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link); 458 snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link);
@@ -474,9 +475,17 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
474 u8 pin; 475 u8 pin;
475 476
476 pin = dev->pin; 477 pin = dev->pin;
477 if (!pin || !pci_has_managed_irq(dev)) 478 if (!pin || !dev->irq_managed || dev->irq <= 0)
478 return; 479 return;
479 480
481 /* Keep IOAPIC pin configuration when suspending */
482 if (dev->dev.power.is_prepared)
483 return;
484#ifdef CONFIG_PM
485 if (dev->dev.power.runtime_status == RPM_SUSPENDING)
486 return;
487#endif
488
480 entry = acpi_pci_irq_lookup(dev, pin); 489 entry = acpi_pci_irq_lookup(dev, pin);
481 if (!entry) 490 if (!entry)
482 return; 491 return;
@@ -496,6 +505,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
496 dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin)); 505 dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
497 if (gsi >= 0) { 506 if (gsi >= 0) {
498 acpi_unregister_gsi(gsi); 507 acpi_unregister_gsi(gsi);
499 pci_reset_managed_irq(dev); 508 dev->irq_managed = 0;
500 } 509 }
501} 510}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index fa2863567eed..ededa909df2f 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -4,7 +4,6 @@
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 Dominik Brodowski <devel@brodo.de> 6 * Copyright (C) 2002 Dominik Brodowski <devel@brodo.de>
7 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
8 * 7 *
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 * 9 *
@@ -438,6 +437,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
438 * enabled system. 437 * enabled system.
439 */ 438 */
440 439
440#define ACPI_MAX_IRQS 256
441#define ACPI_MAX_ISA_IRQ 16 441#define ACPI_MAX_ISA_IRQ 16
442 442
443#define PIRQ_PENALTY_PCI_AVAILABLE (0) 443#define PIRQ_PENALTY_PCI_AVAILABLE (0)
@@ -447,7 +447,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
447#define PIRQ_PENALTY_ISA_USED (16*16*16*16*16) 447#define PIRQ_PENALTY_ISA_USED (16*16*16*16*16)
448#define PIRQ_PENALTY_ISA_ALWAYS (16*16*16*16*16*16) 448#define PIRQ_PENALTY_ISA_ALWAYS (16*16*16*16*16*16)
449 449
450static int acpi_irq_isa_penalty[ACPI_MAX_ISA_IRQ] = { 450static int acpi_irq_penalty[ACPI_MAX_IRQS] = {
451 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ0 timer */ 451 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ0 timer */
452 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ1 keyboard */ 452 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ1 keyboard */
453 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ2 cascade */ 453 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ2 cascade */
@@ -464,68 +464,9 @@ static int acpi_irq_isa_penalty[ACPI_MAX_ISA_IRQ] = {
464 PIRQ_PENALTY_ISA_USED, /* IRQ13 fpe, sometimes */ 464 PIRQ_PENALTY_ISA_USED, /* IRQ13 fpe, sometimes */
465 PIRQ_PENALTY_ISA_USED, /* IRQ14 ide0 */ 465 PIRQ_PENALTY_ISA_USED, /* IRQ14 ide0 */
466 PIRQ_PENALTY_ISA_USED, /* IRQ15 ide1 */ 466 PIRQ_PENALTY_ISA_USED, /* IRQ15 ide1 */
467 /* >IRQ15 */
467}; 468};
468 469
469struct irq_penalty_info {
470 int irq;
471 int penalty;
472 struct list_head node;
473};
474
475static LIST_HEAD(acpi_irq_penalty_list);
476
477static int acpi_irq_get_penalty(int irq)
478{
479 struct irq_penalty_info *irq_info;
480
481 if (irq < ACPI_MAX_ISA_IRQ)
482 return acpi_irq_isa_penalty[irq];
483
484 list_for_each_entry(irq_info, &acpi_irq_penalty_list, node) {
485 if (irq_info->irq == irq)
486 return irq_info->penalty;
487 }
488
489 return 0;
490}
491
492static int acpi_irq_set_penalty(int irq, int new_penalty)
493{
494 struct irq_penalty_info *irq_info;
495
496 /* see if this is a ISA IRQ */
497 if (irq < ACPI_MAX_ISA_IRQ) {
498 acpi_irq_isa_penalty[irq] = new_penalty;
499 return 0;
500 }
501
502 /* next, try to locate from the dynamic list */
503 list_for_each_entry(irq_info, &acpi_irq_penalty_list, node) {
504 if (irq_info->irq == irq) {
505 irq_info->penalty = new_penalty;
506 return 0;
507 }
508 }
509
510 /* nope, let's allocate a slot for this IRQ */
511 irq_info = kzalloc(sizeof(*irq_info), GFP_KERNEL);
512 if (!irq_info)
513 return -ENOMEM;
514
515 irq_info->irq = irq;
516 irq_info->penalty = new_penalty;
517 list_add_tail(&irq_info->node, &acpi_irq_penalty_list);
518
519 return 0;
520}
521
522static void acpi_irq_add_penalty(int irq, int penalty)
523{
524 int curpen = acpi_irq_get_penalty(irq);
525
526 acpi_irq_set_penalty(irq, curpen + penalty);
527}
528
529int __init acpi_irq_penalty_init(void) 470int __init acpi_irq_penalty_init(void)
530{ 471{
531 struct acpi_pci_link *link; 472 struct acpi_pci_link *link;
@@ -546,16 +487,15 @@ int __init acpi_irq_penalty_init(void)
546 link->irq.possible_count; 487 link->irq.possible_count;
547 488
548 for (i = 0; i < link->irq.possible_count; i++) { 489 for (i = 0; i < link->irq.possible_count; i++) {
549 if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ) { 490 if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ)
550 int irqpos = link->irq.possible[i]; 491 acpi_irq_penalty[link->irq.
551 492 possible[i]] +=
552 acpi_irq_add_penalty(irqpos, penalty); 493 penalty;
553 }
554 } 494 }
555 495
556 } else if (link->irq.active) { 496 } else if (link->irq.active) {
557 acpi_irq_add_penalty(link->irq.active, 497 acpi_irq_penalty[link->irq.active] +=
558 PIRQ_PENALTY_PCI_POSSIBLE); 498 PIRQ_PENALTY_PCI_POSSIBLE;
559 } 499 }
560 } 500 }
561 501
@@ -607,12 +547,12 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
607 * the use of IRQs 9, 10, 11, and >15. 547 * the use of IRQs 9, 10, 11, and >15.
608 */ 548 */
609 for (i = (link->irq.possible_count - 1); i >= 0; i--) { 549 for (i = (link->irq.possible_count - 1); i >= 0; i--) {
610 if (acpi_irq_get_penalty(irq) > 550 if (acpi_irq_penalty[irq] >
611 acpi_irq_get_penalty(link->irq.possible[i])) 551 acpi_irq_penalty[link->irq.possible[i]])
612 irq = link->irq.possible[i]; 552 irq = link->irq.possible[i];
613 } 553 }
614 } 554 }
615 if (acpi_irq_get_penalty(irq) >= PIRQ_PENALTY_ISA_ALWAYS) { 555 if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) {
616 printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. " 556 printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. "
617 "Try pci=noacpi or acpi=off\n", 557 "Try pci=noacpi or acpi=off\n",
618 acpi_device_name(link->device), 558 acpi_device_name(link->device),
@@ -628,8 +568,7 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
628 acpi_device_bid(link->device)); 568 acpi_device_bid(link->device));
629 return -ENODEV; 569 return -ENODEV;
630 } else { 570 } else {
631 acpi_irq_add_penalty(link->irq.active, PIRQ_PENALTY_PCI_USING); 571 acpi_irq_penalty[link->irq.active] += PIRQ_PENALTY_PCI_USING;
632
633 printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n", 572 printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
634 acpi_device_name(link->device), 573 acpi_device_name(link->device),
635 acpi_device_bid(link->device), link->irq.active); 574 acpi_device_bid(link->device), link->irq.active);
@@ -839,7 +778,7 @@ static void acpi_pci_link_remove(struct acpi_device *device)
839} 778}
840 779
841/* 780/*
842 * modify penalty from cmdline 781 * modify acpi_irq_penalty[] from cmdline
843 */ 782 */
844static int __init acpi_irq_penalty_update(char *str, int used) 783static int __init acpi_irq_penalty_update(char *str, int used)
845{ 784{
@@ -857,10 +796,13 @@ static int __init acpi_irq_penalty_update(char *str, int used)
857 if (irq < 0) 796 if (irq < 0)
858 continue; 797 continue;
859 798
799 if (irq >= ARRAY_SIZE(acpi_irq_penalty))
800 continue;
801
860 if (used) 802 if (used)
861 acpi_irq_add_penalty(irq, PIRQ_PENALTY_ISA_USED); 803 acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
862 else 804 else
863 acpi_irq_set_penalty(irq, PIRQ_PENALTY_PCI_AVAILABLE); 805 acpi_irq_penalty[irq] = PIRQ_PENALTY_PCI_AVAILABLE;
864 806
865 if (retval != 2) /* no next number */ 807 if (retval != 2) /* no next number */
866 break; 808 break;
@@ -877,15 +819,18 @@ static int __init acpi_irq_penalty_update(char *str, int used)
877 */ 819 */
878void acpi_penalize_isa_irq(int irq, int active) 820void acpi_penalize_isa_irq(int irq, int active)
879{ 821{
880 if (irq >= 0) 822 if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
881 acpi_irq_add_penalty(irq, active ? 823 if (active)
882 PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING); 824 acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
825 else
826 acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
827 }
883} 828}
884 829
885bool acpi_isa_irq_available(int irq) 830bool acpi_isa_irq_available(int irq)
886{ 831{
887 return irq >= 0 && 832 return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) ||
888 (acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS); 833 acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS);
889} 834}
890 835
891/* 836/*
@@ -895,18 +840,13 @@ bool acpi_isa_irq_available(int irq)
895 */ 840 */
896void acpi_penalize_sci_irq(int irq, int trigger, int polarity) 841void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
897{ 842{
898 int penalty; 843 if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
899 844 if (trigger != ACPI_MADT_TRIGGER_LEVEL ||
900 if (irq < 0) 845 polarity != ACPI_MADT_POLARITY_ACTIVE_LOW)
901 return; 846 acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_ALWAYS;
902 847 else
903 if (trigger != ACPI_MADT_TRIGGER_LEVEL || 848 acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
904 polarity != ACPI_MADT_POLARITY_ACTIVE_LOW) 849 }
905 penalty = PIRQ_PENALTY_ISA_ALWAYS;
906 else
907 penalty = PIRQ_PENALTY_PCI_USING;
908
909 acpi_irq_add_penalty(irq, penalty);
910} 850}
911 851
912/* 852/*
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index a39e85f9efa9..7d00b7a015ea 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2074,7 +2074,7 @@ static int binder_thread_write(struct binder_proc *proc,
2074 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 2074 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2075 return -EFAULT; 2075 return -EFAULT;
2076 2076
2077 ptr += sizeof(void *); 2077 ptr += sizeof(cookie);
2078 list_for_each_entry(w, &proc->delivered_death, entry) { 2078 list_for_each_entry(w, &proc->delivered_death, entry) {
2079 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); 2079 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2080 2080
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 546a3692774f..146dc0b8ec61 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -367,15 +367,21 @@ static const struct pci_device_id ahci_pci_tbl[] = {
367 { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ 367 { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
368 { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ 368 { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
369 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/ 369 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
370 { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
370 { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/ 371 { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
372 { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/
371 { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/ 373 { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
372 { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/ 374 { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
373 { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/ 375 { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
374 { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/ 376 { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
377 { PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
378 { PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
375 { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/ 379 { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
376 { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/ 380 { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
377 { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/ 381 { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
378 { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/ 382 { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
383 { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
384 { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
379 385
380 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 386 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
381 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 387 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -1325,6 +1331,44 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
1325{} 1331{}
1326#endif 1332#endif
1327 1333
1334#ifdef CONFIG_ARM64
1335/*
1336 * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
1337 * Workaround is to make sure all pending IRQs are served before leaving
1338 * handler.
1339 */
1340static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
1341{
1342 struct ata_host *host = dev_instance;
1343 struct ahci_host_priv *hpriv;
1344 unsigned int rc = 0;
1345 void __iomem *mmio;
1346 u32 irq_stat, irq_masked;
1347 unsigned int handled = 1;
1348
1349 VPRINTK("ENTER\n");
1350 hpriv = host->private_data;
1351 mmio = hpriv->mmio;
1352 irq_stat = readl(mmio + HOST_IRQ_STAT);
1353 if (!irq_stat)
1354 return IRQ_NONE;
1355
1356 do {
1357 irq_masked = irq_stat & hpriv->port_map;
1358 spin_lock(&host->lock);
1359 rc = ahci_handle_port_intr(host, irq_masked);
1360 if (!rc)
1361 handled = 0;
1362 writel(irq_stat, mmio + HOST_IRQ_STAT);
1363 irq_stat = readl(mmio + HOST_IRQ_STAT);
1364 spin_unlock(&host->lock);
1365 } while (irq_stat);
1366 VPRINTK("EXIT\n");
1367
1368 return IRQ_RETVAL(handled);
1369}
1370#endif
1371
1328/* 1372/*
1329 * ahci_init_msix() - optionally enable per-port MSI-X otherwise defer 1373 * ahci_init_msix() - optionally enable per-port MSI-X otherwise defer
1330 * to single msi. 1374 * to single msi.
@@ -1560,6 +1604,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1560 if (ahci_broken_devslp(pdev)) 1604 if (ahci_broken_devslp(pdev))
1561 hpriv->flags |= AHCI_HFLAG_NO_DEVSLP; 1605 hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
1562 1606
1607#ifdef CONFIG_ARM64
1608 if (pdev->vendor == 0x177d && pdev->device == 0xa01c)
1609 hpriv->irq_handler = ahci_thunderx_irq_handler;
1610#endif
1611
1563 /* save initial config */ 1612 /* save initial config */
1564 ahci_pci_save_initial_config(pdev, hpriv); 1613 ahci_pci_save_initial_config(pdev, hpriv);
1565 1614
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index a44c75d4c284..167ba7e3b92e 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -240,8 +240,7 @@ enum {
240 error-handling stage) */ 240 error-handling stage) */
241 AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */ 241 AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */
242 AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */ 242 AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */
243 AHCI_HFLAG_EDGE_IRQ = (1 << 19), /* HOST_IRQ_STAT behaves as 243
244 Edge Triggered */
245#ifdef CONFIG_PCI_MSI 244#ifdef CONFIG_PCI_MSI
246 AHCI_HFLAG_MULTI_MSI = (1 << 20), /* multiple PCI MSIs */ 245 AHCI_HFLAG_MULTI_MSI = (1 << 20), /* multiple PCI MSIs */
247 AHCI_HFLAG_MULTI_MSIX = (1 << 21), /* per-port MSI-X */ 246 AHCI_HFLAG_MULTI_MSIX = (1 << 21), /* per-port MSI-X */
@@ -361,6 +360,7 @@ struct ahci_host_priv {
361 * be overridden anytime before the host is activated. 360 * be overridden anytime before the host is activated.
362 */ 361 */
363 void (*start_engine)(struct ata_port *ap); 362 void (*start_engine)(struct ata_port *ap);
363 irqreturn_t (*irq_handler)(int irq, void *dev_instance);
364}; 364};
365 365
366#ifdef CONFIG_PCI_MSI 366#ifdef CONFIG_PCI_MSI
@@ -424,6 +424,7 @@ int ahci_reset_em(struct ata_host *host);
424void ahci_print_info(struct ata_host *host, const char *scc_s); 424void ahci_print_info(struct ata_host *host, const char *scc_s);
425int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht); 425int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht);
426void ahci_error_handler(struct ata_port *ap); 426void ahci_error_handler(struct ata_port *ap);
427u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked);
427 428
428static inline void __iomem *__ahci_port_base(struct ata_host *host, 429static inline void __iomem *__ahci_port_base(struct ata_host *host,
429 unsigned int port_no) 430 unsigned int port_no)
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index e2c6d9e0c5ac..8e3f7faf00d3 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -548,6 +548,88 @@ softreset_retry:
548 return rc; 548 return rc;
549} 549}
550 550
551/**
552 * xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
553 * @ata_host: Host that recieved the irq
554 * @irq_masked: HOST_IRQ_STAT value
555 *
556 * For hardware with broken edge trigger latch
557 * the HOST_IRQ_STAT register misses the edge interrupt
558 * when clearing of HOST_IRQ_STAT register and hardware
559 * reporting the PORT_IRQ_STAT register at the
560 * same clock cycle.
561 * As such, the algorithm below outlines the workaround.
562 *
563 * 1. Read HOST_IRQ_STAT register and save the state.
564 * 2. Clear the HOST_IRQ_STAT register.
565 * 3. Read back the HOST_IRQ_STAT register.
566 * 4. If HOST_IRQ_STAT register equals to zero, then
567 * traverse the rest of port's PORT_IRQ_STAT register
568 * to check if an interrupt is triggered at that point else
569 * go to step 6.
570 * 5. If PORT_IRQ_STAT register of rest ports is not equal to zero
571 * then update the state of HOST_IRQ_STAT saved in step 1.
572 * 6. Handle port interrupts.
573 * 7. Exit
574 */
575static int xgene_ahci_handle_broken_edge_irq(struct ata_host *host,
576 u32 irq_masked)
577{
578 struct ahci_host_priv *hpriv = host->private_data;
579 void __iomem *port_mmio;
580 int i;
581
582 if (!readl(hpriv->mmio + HOST_IRQ_STAT)) {
583 for (i = 0; i < host->n_ports; i++) {
584 if (irq_masked & (1 << i))
585 continue;
586
587 port_mmio = ahci_port_base(host->ports[i]);
588 if (readl(port_mmio + PORT_IRQ_STAT))
589 irq_masked |= (1 << i);
590 }
591 }
592
593 return ahci_handle_port_intr(host, irq_masked);
594}
595
596static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
597{
598 struct ata_host *host = dev_instance;
599 struct ahci_host_priv *hpriv;
600 unsigned int rc = 0;
601 void __iomem *mmio;
602 u32 irq_stat, irq_masked;
603
604 VPRINTK("ENTER\n");
605
606 hpriv = host->private_data;
607 mmio = hpriv->mmio;
608
609 /* sigh. 0xffffffff is a valid return from h/w */
610 irq_stat = readl(mmio + HOST_IRQ_STAT);
611 if (!irq_stat)
612 return IRQ_NONE;
613
614 irq_masked = irq_stat & hpriv->port_map;
615
616 spin_lock(&host->lock);
617
618 /*
619 * HOST_IRQ_STAT behaves as edge triggered latch meaning that
620 * it should be cleared before all the port events are cleared.
621 */
622 writel(irq_stat, mmio + HOST_IRQ_STAT);
623
624 rc = xgene_ahci_handle_broken_edge_irq(host, irq_masked);
625
626 spin_unlock(&host->lock);
627
628 VPRINTK("EXIT\n");
629
630 return IRQ_RETVAL(rc);
631}
632
551static struct ata_port_operations xgene_ahci_v1_ops = { 633static struct ata_port_operations xgene_ahci_v1_ops = {
552 .inherits = &ahci_ops, 634 .inherits = &ahci_ops,
553 .host_stop = xgene_ahci_host_stop, 635 .host_stop = xgene_ahci_host_stop,
@@ -779,7 +861,8 @@ skip_clk_phy:
779 hpriv->flags = AHCI_HFLAG_NO_NCQ; 861 hpriv->flags = AHCI_HFLAG_NO_NCQ;
780 break; 862 break;
781 case XGENE_AHCI_V2: 863 case XGENE_AHCI_V2:
782 hpriv->flags |= AHCI_HFLAG_YES_FBS | AHCI_HFLAG_EDGE_IRQ; 864 hpriv->flags |= AHCI_HFLAG_YES_FBS;
865 hpriv->irq_handler = xgene_ahci_irq_intr;
783 break; 866 break;
784 default: 867 default:
785 break; 868 break;
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 402967902cbe..85ea5142a095 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -113,6 +113,7 @@ static ssize_t ahci_store_em_buffer(struct device *dev,
113 const char *buf, size_t size); 113 const char *buf, size_t size);
114static ssize_t ahci_show_em_supported(struct device *dev, 114static ssize_t ahci_show_em_supported(struct device *dev,
115 struct device_attribute *attr, char *buf); 115 struct device_attribute *attr, char *buf);
116static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance);
116 117
117static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL); 118static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
118static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL); 119static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
@@ -512,6 +513,9 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
512 513
513 if (!hpriv->start_engine) 514 if (!hpriv->start_engine)
514 hpriv->start_engine = ahci_start_engine; 515 hpriv->start_engine = ahci_start_engine;
516
517 if (!hpriv->irq_handler)
518 hpriv->irq_handler = ahci_single_level_irq_intr;
515} 519}
516EXPORT_SYMBOL_GPL(ahci_save_initial_config); 520EXPORT_SYMBOL_GPL(ahci_save_initial_config);
517 521
@@ -1164,8 +1168,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
1164 1168
1165 /* mark esata ports */ 1169 /* mark esata ports */
1166 tmp = readl(port_mmio + PORT_CMD); 1170 tmp = readl(port_mmio + PORT_CMD);
1167 if ((tmp & PORT_CMD_HPCP) || 1171 if ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS))
1168 ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)))
1169 ap->pflags |= ATA_PFLAG_EXTERNAL; 1172 ap->pflags |= ATA_PFLAG_EXTERNAL;
1170} 1173}
1171 1174
@@ -1846,7 +1849,7 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
1846 return IRQ_HANDLED; 1849 return IRQ_HANDLED;
1847} 1850}
1848 1851
1849static u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked) 1852u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
1850{ 1853{
1851 unsigned int i, handled = 0; 1854 unsigned int i, handled = 0;
1852 1855
@@ -1872,43 +1875,7 @@ static u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
1872 1875
1873 return handled; 1876 return handled;
1874} 1877}
1875 1878EXPORT_SYMBOL_GPL(ahci_handle_port_intr);
1876static irqreturn_t ahci_single_edge_irq_intr(int irq, void *dev_instance)
1877{
1878 struct ata_host *host = dev_instance;
1879 struct ahci_host_priv *hpriv;
1880 unsigned int rc = 0;
1881 void __iomem *mmio;
1882 u32 irq_stat, irq_masked;
1883
1884 VPRINTK("ENTER\n");
1885
1886 hpriv = host->private_data;
1887 mmio = hpriv->mmio;
1888
1889 /* sigh. 0xffffffff is a valid return from h/w */
1890 irq_stat = readl(mmio + HOST_IRQ_STAT);
1891 if (!irq_stat)
1892 return IRQ_NONE;
1893
1894 irq_masked = irq_stat & hpriv->port_map;
1895
1896 spin_lock(&host->lock);
1897
1898 /*
1899 * HOST_IRQ_STAT behaves as edge triggered latch meaning that
1900 * it should be cleared before all the port events are cleared.
1901 */
1902 writel(irq_stat, mmio + HOST_IRQ_STAT);
1903
1904 rc = ahci_handle_port_intr(host, irq_masked);
1905
1906 spin_unlock(&host->lock);
1907
1908 VPRINTK("EXIT\n");
1909
1910 return IRQ_RETVAL(rc);
1911}
1912 1879
1913static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance) 1880static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
1914{ 1881{
@@ -2535,14 +2502,18 @@ int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht)
2535 int irq = hpriv->irq; 2502 int irq = hpriv->irq;
2536 int rc; 2503 int rc;
2537 2504
2538 if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) 2505 if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) {
2506 if (hpriv->irq_handler)
2507 dev_warn(host->dev, "both AHCI_HFLAG_MULTI_MSI flag set \
2508 and custom irq handler implemented\n");
2509
2539 rc = ahci_host_activate_multi_irqs(host, sht); 2510 rc = ahci_host_activate_multi_irqs(host, sht);
2540 else if (hpriv->flags & AHCI_HFLAG_EDGE_IRQ) 2511 } else {
2541 rc = ata_host_activate(host, irq, ahci_single_edge_irq_intr, 2512 rc = ata_host_activate(host, irq, hpriv->irq_handler,
2542 IRQF_SHARED, sht);
2543 else
2544 rc = ata_host_activate(host, irq, ahci_single_level_irq_intr,
2545 IRQF_SHARED, sht); 2513 IRQF_SHARED, sht);
2514 }
2515
2516
2546 return rc; 2517 return rc;
2547} 2518}
2548EXPORT_SYMBOL_GPL(ahci_host_activate); 2519EXPORT_SYMBOL_GPL(ahci_host_activate);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 7e959f90c020..e417e1a1d02c 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -675,19 +675,18 @@ static int ata_ioc32(struct ata_port *ap)
675int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, 675int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
676 int cmd, void __user *arg) 676 int cmd, void __user *arg)
677{ 677{
678 int val = -EINVAL, rc = -EINVAL; 678 unsigned long val;
679 int rc = -EINVAL;
679 unsigned long flags; 680 unsigned long flags;
680 681
681 switch (cmd) { 682 switch (cmd) {
682 case ATA_IOC_GET_IO32: 683 case HDIO_GET_32BIT:
683 spin_lock_irqsave(ap->lock, flags); 684 spin_lock_irqsave(ap->lock, flags);
684 val = ata_ioc32(ap); 685 val = ata_ioc32(ap);
685 spin_unlock_irqrestore(ap->lock, flags); 686 spin_unlock_irqrestore(ap->lock, flags);
686 if (copy_to_user(arg, &val, 1)) 687 return put_user(val, (unsigned long __user *)arg);
687 return -EFAULT;
688 return 0;
689 688
690 case ATA_IOC_SET_IO32: 689 case HDIO_SET_32BIT:
691 val = (unsigned long) arg; 690 val = (unsigned long) arg;
692 rc = 0; 691 rc = 0;
693 spin_lock_irqsave(ap->lock, flags); 692 spin_lock_irqsave(ap->lock, flags);
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 12fe0f3bb7e9..c8b6a780a290 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -32,6 +32,8 @@
32#include <linux/libata.h> 32#include <linux/libata.h>
33#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
34 34
35#include <asm/mach-rc32434/rb.h>
36
35#define DRV_NAME "pata-rb532-cf" 37#define DRV_NAME "pata-rb532-cf"
36#define DRV_VERSION "0.1.0" 38#define DRV_VERSION "0.1.0"
37#define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash" 39#define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash"
@@ -107,6 +109,7 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
107 int gpio; 109 int gpio;
108 struct resource *res; 110 struct resource *res;
109 struct ata_host *ah; 111 struct ata_host *ah;
112 struct cf_device *pdata;
110 struct rb532_cf_info *info; 113 struct rb532_cf_info *info;
111 int ret; 114 int ret;
112 115
@@ -122,7 +125,13 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
122 return -ENOENT; 125 return -ENOENT;
123 } 126 }
124 127
125 gpio = irq_to_gpio(irq); 128 pdata = dev_get_platdata(&pdev->dev);
129 if (!pdata) {
130 dev_err(&pdev->dev, "no platform data specified\n");
131 return -EINVAL;
132 }
133
134 gpio = pdata->gpio_pin;
126 if (gpio < 0) { 135 if (gpio < 0) {
127 dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq); 136 dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq);
128 return -ENOENT; 137 return -ENOENT;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 9e251201dd48..84708a5f8c52 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -866,7 +866,7 @@ static void set_fdc(int drive)
866} 866}
867 867
868/* locks the driver */ 868/* locks the driver */
869static int lock_fdc(int drive, bool interruptible) 869static int lock_fdc(int drive)
870{ 870{
871 if (WARN(atomic_read(&usage_count) == 0, 871 if (WARN(atomic_read(&usage_count) == 0,
872 "Trying to lock fdc while usage count=0\n")) 872 "Trying to lock fdc while usage count=0\n"))
@@ -2173,7 +2173,7 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
2173{ 2173{
2174 int ret; 2174 int ret;
2175 2175
2176 if (lock_fdc(drive, true)) 2176 if (lock_fdc(drive))
2177 return -EINTR; 2177 return -EINTR;
2178 2178
2179 set_floppy(drive); 2179 set_floppy(drive);
@@ -2960,7 +2960,7 @@ static int user_reset_fdc(int drive, int arg, bool interruptible)
2960{ 2960{
2961 int ret; 2961 int ret;
2962 2962
2963 if (lock_fdc(drive, interruptible)) 2963 if (lock_fdc(drive))
2964 return -EINTR; 2964 return -EINTR;
2965 2965
2966 if (arg == FD_RESET_ALWAYS) 2966 if (arg == FD_RESET_ALWAYS)
@@ -3243,7 +3243,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
3243 if (!capable(CAP_SYS_ADMIN)) 3243 if (!capable(CAP_SYS_ADMIN))
3244 return -EPERM; 3244 return -EPERM;
3245 mutex_lock(&open_lock); 3245 mutex_lock(&open_lock);
3246 if (lock_fdc(drive, true)) { 3246 if (lock_fdc(drive)) {
3247 mutex_unlock(&open_lock); 3247 mutex_unlock(&open_lock);
3248 return -EINTR; 3248 return -EINTR;
3249 } 3249 }
@@ -3263,7 +3263,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
3263 } else { 3263 } else {
3264 int oldStretch; 3264 int oldStretch;
3265 3265
3266 if (lock_fdc(drive, true)) 3266 if (lock_fdc(drive))
3267 return -EINTR; 3267 return -EINTR;
3268 if (cmd != FDDEFPRM) { 3268 if (cmd != FDDEFPRM) {
3269 /* notice a disk change immediately, else 3269 /* notice a disk change immediately, else
@@ -3349,7 +3349,7 @@ static int get_floppy_geometry(int drive, int type, struct floppy_struct **g)
3349 if (type) 3349 if (type)
3350 *g = &floppy_type[type]; 3350 *g = &floppy_type[type];
3351 else { 3351 else {
3352 if (lock_fdc(drive, false)) 3352 if (lock_fdc(drive))
3353 return -EINTR; 3353 return -EINTR;
3354 if (poll_drive(false, 0) == -EINTR) 3354 if (poll_drive(false, 0) == -EINTR)
3355 return -EINTR; 3355 return -EINTR;
@@ -3433,7 +3433,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3433 if (UDRS->fd_ref != 1) 3433 if (UDRS->fd_ref != 1)
3434 /* somebody else has this drive open */ 3434 /* somebody else has this drive open */
3435 return -EBUSY; 3435 return -EBUSY;
3436 if (lock_fdc(drive, true)) 3436 if (lock_fdc(drive))
3437 return -EINTR; 3437 return -EINTR;
3438 3438
3439 /* do the actual eject. Fails on 3439 /* do the actual eject. Fails on
@@ -3445,7 +3445,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3445 process_fd_request(); 3445 process_fd_request();
3446 return ret; 3446 return ret;
3447 case FDCLRPRM: 3447 case FDCLRPRM:
3448 if (lock_fdc(drive, true)) 3448 if (lock_fdc(drive))
3449 return -EINTR; 3449 return -EINTR;
3450 current_type[drive] = NULL; 3450 current_type[drive] = NULL;
3451 floppy_sizes[drive] = MAX_DISK_SIZE << 1; 3451 floppy_sizes[drive] = MAX_DISK_SIZE << 1;
@@ -3467,7 +3467,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3467 UDP->flags &= ~FTD_MSG; 3467 UDP->flags &= ~FTD_MSG;
3468 return 0; 3468 return 0;
3469 case FDFMTBEG: 3469 case FDFMTBEG:
3470 if (lock_fdc(drive, true)) 3470 if (lock_fdc(drive))
3471 return -EINTR; 3471 return -EINTR;
3472 if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) 3472 if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
3473 return -EINTR; 3473 return -EINTR;
@@ -3484,7 +3484,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3484 return do_format(drive, &inparam.f); 3484 return do_format(drive, &inparam.f);
3485 case FDFMTEND: 3485 case FDFMTEND:
3486 case FDFLUSH: 3486 case FDFLUSH:
3487 if (lock_fdc(drive, true)) 3487 if (lock_fdc(drive))
3488 return -EINTR; 3488 return -EINTR;
3489 return invalidate_drive(bdev); 3489 return invalidate_drive(bdev);
3490 case FDSETEMSGTRESH: 3490 case FDSETEMSGTRESH:
@@ -3507,7 +3507,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3507 outparam = UDP; 3507 outparam = UDP;
3508 break; 3508 break;
3509 case FDPOLLDRVSTAT: 3509 case FDPOLLDRVSTAT:
3510 if (lock_fdc(drive, true)) 3510 if (lock_fdc(drive))
3511 return -EINTR; 3511 return -EINTR;
3512 if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) 3512 if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
3513 return -EINTR; 3513 return -EINTR;
@@ -3530,7 +3530,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3530 case FDRAWCMD: 3530 case FDRAWCMD:
3531 if (type) 3531 if (type)
3532 return -EINVAL; 3532 return -EINVAL;
3533 if (lock_fdc(drive, true)) 3533 if (lock_fdc(drive))
3534 return -EINTR; 3534 return -EINTR;
3535 set_floppy(drive); 3535 set_floppy(drive);
3536 i = raw_cmd_ioctl(cmd, (void __user *)param); 3536 i = raw_cmd_ioctl(cmd, (void __user *)param);
@@ -3539,7 +3539,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3539 process_fd_request(); 3539 process_fd_request();
3540 return i; 3540 return i;
3541 case FDTWADDLE: 3541 case FDTWADDLE:
3542 if (lock_fdc(drive, true)) 3542 if (lock_fdc(drive))
3543 return -EINTR; 3543 return -EINTR;
3544 twaddle(); 3544 twaddle();
3545 process_fd_request(); 3545 process_fd_request();
@@ -3663,6 +3663,11 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
3663 3663
3664 opened_bdev[drive] = bdev; 3664 opened_bdev[drive] = bdev;
3665 3665
3666 if (!(mode & (FMODE_READ|FMODE_WRITE))) {
3667 res = -EINVAL;
3668 goto out;
3669 }
3670
3666 res = -ENXIO; 3671 res = -ENXIO;
3667 3672
3668 if (!floppy_track_buffer) { 3673 if (!floppy_track_buffer) {
@@ -3706,21 +3711,20 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
3706 if (UFDCS->rawcmd == 1) 3711 if (UFDCS->rawcmd == 1)
3707 UFDCS->rawcmd = 2; 3712 UFDCS->rawcmd = 2;
3708 3713
3709 if (!(mode & FMODE_NDELAY)) { 3714 UDRS->last_checked = 0;
3710 if (mode & (FMODE_READ|FMODE_WRITE)) { 3715 clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
3711 UDRS->last_checked = 0; 3716 check_disk_change(bdev);
3712 clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); 3717 if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
3713 check_disk_change(bdev); 3718 goto out;
3714 if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) 3719 if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
3715 goto out; 3720 goto out;
3716 if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) 3721
3717 goto out; 3722 res = -EROFS;
3718 } 3723
3719 res = -EROFS; 3724 if ((mode & FMODE_WRITE) &&
3720 if ((mode & FMODE_WRITE) && 3725 !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
3721 !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) 3726 goto out;
3722 goto out; 3727
3723 }
3724 mutex_unlock(&open_lock); 3728 mutex_unlock(&open_lock);
3725 mutex_unlock(&floppy_mutex); 3729 mutex_unlock(&floppy_mutex);
3726 return 0; 3730 return 0;
@@ -3748,7 +3752,8 @@ static unsigned int floppy_check_events(struct gendisk *disk,
3748 return DISK_EVENT_MEDIA_CHANGE; 3752 return DISK_EVENT_MEDIA_CHANGE;
3749 3753
3750 if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { 3754 if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
3751 lock_fdc(drive, false); 3755 if (lock_fdc(drive))
3756 return -EINTR;
3752 poll_drive(false, 0); 3757 poll_drive(false, 0);
3753 process_fd_request(); 3758 process_fd_request();
3754 } 3759 }
@@ -3847,7 +3852,9 @@ static int floppy_revalidate(struct gendisk *disk)
3847 "VFS: revalidate called on non-open device.\n")) 3852 "VFS: revalidate called on non-open device.\n"))
3848 return -EFAULT; 3853 return -EFAULT;
3849 3854
3850 lock_fdc(drive, false); 3855 res = lock_fdc(drive);
3856 if (res)
3857 return res;
3851 cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || 3858 cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
3852 test_bit(FD_VERIFY_BIT, &UDRS->flags)); 3859 test_bit(FD_VERIFY_BIT, &UDRS->flags));
3853 if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) { 3860 if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) {
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 8ba1e97d573c..64a7b5971b57 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -478,7 +478,7 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
478 id->ver_id = 0x1; 478 id->ver_id = 0x1;
479 id->vmnt = 0; 479 id->vmnt = 0;
480 id->cgrps = 1; 480 id->cgrps = 1;
481 id->cap = 0x3; 481 id->cap = 0x2;
482 id->dom = 0x1; 482 id->dom = 0x1;
483 483
484 id->ppaf.blk_offset = 0; 484 id->ppaf.blk_offset = 0;
@@ -707,9 +707,7 @@ static int null_add_dev(void)
707 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); 707 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
708 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); 708 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
709 709
710
711 mutex_lock(&lock); 710 mutex_lock(&lock);
712 list_add_tail(&nullb->list, &nullb_list);
713 nullb->index = nullb_indexes++; 711 nullb->index = nullb_indexes++;
714 mutex_unlock(&lock); 712 mutex_unlock(&lock);
715 713
@@ -743,6 +741,10 @@ static int null_add_dev(void)
743 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); 741 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
744 742
745 add_disk(disk); 743 add_disk(disk);
744
745 mutex_lock(&lock);
746 list_add_tail(&nullb->list, &nullb_list);
747 mutex_unlock(&lock);
746done: 748done:
747 return 0; 749 return 0;
748 750
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8a8dc91c39f7..83eb9e6bf8b0 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1873,6 +1873,43 @@ again:
1873 return err; 1873 return err;
1874} 1874}
1875 1875
1876static int negotiate_mq(struct blkfront_info *info)
1877{
1878 unsigned int backend_max_queues = 0;
1879 int err;
1880 unsigned int i;
1881
1882 BUG_ON(info->nr_rings);
1883
1884 /* Check if backend supports multiple queues. */
1885 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1886 "multi-queue-max-queues", "%u", &backend_max_queues);
1887 if (err < 0)
1888 backend_max_queues = 1;
1889
1890 info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
1891 /* We need at least one ring. */
1892 if (!info->nr_rings)
1893 info->nr_rings = 1;
1894
1895 info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
1896 if (!info->rinfo) {
1897 xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
1898 return -ENOMEM;
1899 }
1900
1901 for (i = 0; i < info->nr_rings; i++) {
1902 struct blkfront_ring_info *rinfo;
1903
1904 rinfo = &info->rinfo[i];
1905 INIT_LIST_HEAD(&rinfo->indirect_pages);
1906 INIT_LIST_HEAD(&rinfo->grants);
1907 rinfo->dev_info = info;
1908 INIT_WORK(&rinfo->work, blkif_restart_queue);
1909 spin_lock_init(&rinfo->ring_lock);
1910 }
1911 return 0;
1912}
1876/** 1913/**
1877 * Entry point to this code when a new device is created. Allocate the basic 1914 * Entry point to this code when a new device is created. Allocate the basic
1878 * structures and the ring buffer for communication with the backend, and 1915 * structures and the ring buffer for communication with the backend, and
@@ -1883,9 +1920,7 @@ static int blkfront_probe(struct xenbus_device *dev,
1883 const struct xenbus_device_id *id) 1920 const struct xenbus_device_id *id)
1884{ 1921{
1885 int err, vdevice; 1922 int err, vdevice;
1886 unsigned int r_index;
1887 struct blkfront_info *info; 1923 struct blkfront_info *info;
1888 unsigned int backend_max_queues = 0;
1889 1924
1890 /* FIXME: Use dynamic device id if this is not set. */ 1925 /* FIXME: Use dynamic device id if this is not set. */
1891 err = xenbus_scanf(XBT_NIL, dev->nodename, 1926 err = xenbus_scanf(XBT_NIL, dev->nodename,
@@ -1936,33 +1971,10 @@ static int blkfront_probe(struct xenbus_device *dev,
1936 } 1971 }
1937 1972
1938 info->xbdev = dev; 1973 info->xbdev = dev;
1939 /* Check if backend supports multiple queues. */ 1974 err = negotiate_mq(info);
1940 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 1975 if (err) {
1941 "multi-queue-max-queues", "%u", &backend_max_queues);
1942 if (err < 0)
1943 backend_max_queues = 1;
1944
1945 info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
1946 /* We need at least one ring. */
1947 if (!info->nr_rings)
1948 info->nr_rings = 1;
1949
1950 info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
1951 if (!info->rinfo) {
1952 xenbus_dev_fatal(dev, -ENOMEM, "allocating ring_info structure");
1953 kfree(info); 1976 kfree(info);
1954 return -ENOMEM; 1977 return err;
1955 }
1956
1957 for (r_index = 0; r_index < info->nr_rings; r_index++) {
1958 struct blkfront_ring_info *rinfo;
1959
1960 rinfo = &info->rinfo[r_index];
1961 INIT_LIST_HEAD(&rinfo->indirect_pages);
1962 INIT_LIST_HEAD(&rinfo->grants);
1963 rinfo->dev_info = info;
1964 INIT_WORK(&rinfo->work, blkif_restart_queue);
1965 spin_lock_init(&rinfo->ring_lock);
1966 } 1978 }
1967 1979
1968 mutex_init(&info->mutex); 1980 mutex_init(&info->mutex);
@@ -2123,12 +2135,16 @@ static int blkif_recover(struct blkfront_info *info)
2123static int blkfront_resume(struct xenbus_device *dev) 2135static int blkfront_resume(struct xenbus_device *dev)
2124{ 2136{
2125 struct blkfront_info *info = dev_get_drvdata(&dev->dev); 2137 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2126 int err; 2138 int err = 0;
2127 2139
2128 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); 2140 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
2129 2141
2130 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); 2142 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
2131 2143
2144 err = negotiate_mq(info);
2145 if (err)
2146 return err;
2147
2132 err = talk_to_blkback(dev, info); 2148 err = talk_to_blkback(dev, info);
2133 2149
2134 /* 2150 /*
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d0da5d852d41..b583e5336630 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1819,6 +1819,28 @@ unsigned int get_random_int(void)
1819EXPORT_SYMBOL(get_random_int); 1819EXPORT_SYMBOL(get_random_int);
1820 1820
1821/* 1821/*
1822 * Same as get_random_int(), but returns unsigned long.
1823 */
1824unsigned long get_random_long(void)
1825{
1826 __u32 *hash;
1827 unsigned long ret;
1828
1829 if (arch_get_random_long(&ret))
1830 return ret;
1831
1832 hash = get_cpu_var(get_random_int_hash);
1833
1834 hash[0] += current->pid + jiffies + random_get_entropy();
1835 md5_transform(hash, random_int_secret);
1836 ret = *(unsigned long *)hash;
1837 put_cpu_var(get_random_int_hash);
1838
1839 return ret;
1840}
1841EXPORT_SYMBOL(get_random_long);
1842
1843/*
1822 * randomize_range() returns a start address such that 1844 * randomize_range() returns a start address such that
1823 * 1845 *
1824 * [...... <range> .....] 1846 * [...... <range> .....]
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index b038e3666058..bae4be6501df 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -43,7 +43,7 @@ obj-$(CONFIG_COMMON_CLK_SI514) += clk-si514.o
43obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o 43obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
44obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o 44obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
45obj-$(CONFIG_ARCH_STM32) += clk-stm32f4.o 45obj-$(CONFIG_ARCH_STM32) += clk-stm32f4.o
46obj-$(CONFIG_ARCH_TANGOX) += clk-tango4.o 46obj-$(CONFIG_ARCH_TANGO) += clk-tango4.o
47obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o 47obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
48obj-$(CONFIG_ARCH_U300) += clk-u300.o 48obj-$(CONFIG_ARCH_U300) += clk-u300.o
49obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o 49obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 19fed65587e8..7b09a265d79f 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -289,7 +289,7 @@ static void __init of_gpio_clk_setup(struct device_node *node,
289 289
290 num_parents = of_clk_get_parent_count(node); 290 num_parents = of_clk_get_parent_count(node);
291 if (num_parents < 0) 291 if (num_parents < 0)
292 return; 292 num_parents = 0;
293 293
294 data = kzalloc(sizeof(*data), GFP_KERNEL); 294 data = kzalloc(sizeof(*data), GFP_KERNEL);
295 if (!data) 295 if (!data)
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index cd0f2726f5e0..89e9ca78bb94 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -299,7 +299,7 @@ static int scpi_clocks_probe(struct platform_device *pdev)
299 /* Add the virtual cpufreq device */ 299 /* Add the virtual cpufreq device */
300 cpufreq_dev = platform_device_register_simple("scpi-cpufreq", 300 cpufreq_dev = platform_device_register_simple("scpi-cpufreq",
301 -1, NULL, 0); 301 -1, NULL, 0);
302 if (!cpufreq_dev) 302 if (IS_ERR(cpufreq_dev))
303 pr_warn("unable to register cpufreq device"); 303 pr_warn("unable to register cpufreq device");
304 304
305 return 0; 305 return 0;
diff --git a/drivers/clk/mvebu/dove-divider.c b/drivers/clk/mvebu/dove-divider.c
index d5c5bfa35a5a..3e0b52daa35f 100644
--- a/drivers/clk/mvebu/dove-divider.c
+++ b/drivers/clk/mvebu/dove-divider.c
@@ -247,7 +247,7 @@ static struct clk_onecell_data dove_divider_data = {
247 247
248void __init dove_divider_clk_init(struct device_node *np) 248void __init dove_divider_clk_init(struct device_node *np)
249{ 249{
250 void *base; 250 void __iomem *base;
251 251
252 base = of_iomap(np, 0); 252 base = of_iomap(np, 0);
253 if (WARN_ON(!base)) 253 if (WARN_ON(!base))
diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
index cf73e539e9f6..070037a29ea5 100644
--- a/drivers/clk/qcom/gcc-apq8084.c
+++ b/drivers/clk/qcom/gcc-apq8084.c
@@ -3587,7 +3587,6 @@ static const struct regmap_config gcc_apq8084_regmap_config = {
3587 .val_bits = 32, 3587 .val_bits = 32,
3588 .max_register = 0x1fc0, 3588 .max_register = 0x1fc0,
3589 .fast_io = true, 3589 .fast_io = true,
3590 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3591}; 3590};
3592 3591
3593static const struct qcom_cc_desc gcc_apq8084_desc = { 3592static const struct qcom_cc_desc gcc_apq8084_desc = {
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index b692ae881d6a..dd5402bac620 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -3005,7 +3005,6 @@ static const struct regmap_config gcc_ipq806x_regmap_config = {
3005 .val_bits = 32, 3005 .val_bits = 32,
3006 .max_register = 0x3e40, 3006 .max_register = 0x3e40,
3007 .fast_io = true, 3007 .fast_io = true,
3008 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3009}; 3008};
3010 3009
3011static const struct qcom_cc_desc gcc_ipq806x_desc = { 3010static const struct qcom_cc_desc gcc_ipq806x_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index f6a2b14dfec4..ad413036f7c7 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -2702,7 +2702,6 @@ static const struct regmap_config gcc_msm8660_regmap_config = {
2702 .val_bits = 32, 2702 .val_bits = 32,
2703 .max_register = 0x363c, 2703 .max_register = 0x363c,
2704 .fast_io = true, 2704 .fast_io = true,
2705 .val_format_endian = REGMAP_ENDIAN_LITTLE,
2706}; 2705};
2707 2706
2708static const struct qcom_cc_desc gcc_msm8660_desc = { 2707static const struct qcom_cc_desc gcc_msm8660_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index e3bf09d7d0ef..8cc9b2868b41 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -3336,7 +3336,6 @@ static const struct regmap_config gcc_msm8916_regmap_config = {
3336 .val_bits = 32, 3336 .val_bits = 32,
3337 .max_register = 0x80000, 3337 .max_register = 0x80000,
3338 .fast_io = true, 3338 .fast_io = true,
3339 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3340}; 3339};
3341 3340
3342static const struct qcom_cc_desc gcc_msm8916_desc = { 3341static const struct qcom_cc_desc gcc_msm8916_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index f31111e32d44..983dd7dc89a7 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -3468,7 +3468,6 @@ static const struct regmap_config gcc_msm8960_regmap_config = {
3468 .val_bits = 32, 3468 .val_bits = 32,
3469 .max_register = 0x3660, 3469 .max_register = 0x3660,
3470 .fast_io = true, 3470 .fast_io = true,
3471 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3472}; 3471};
3473 3472
3474static const struct regmap_config gcc_apq8064_regmap_config = { 3473static const struct regmap_config gcc_apq8064_regmap_config = {
@@ -3477,7 +3476,6 @@ static const struct regmap_config gcc_apq8064_regmap_config = {
3477 .val_bits = 32, 3476 .val_bits = 32,
3478 .max_register = 0x3880, 3477 .max_register = 0x3880,
3479 .fast_io = true, 3478 .fast_io = true,
3480 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3481}; 3479};
3482 3480
3483static const struct qcom_cc_desc gcc_msm8960_desc = { 3481static const struct qcom_cc_desc gcc_msm8960_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index df164d618e34..335952db309b 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -2680,7 +2680,6 @@ static const struct regmap_config gcc_msm8974_regmap_config = {
2680 .val_bits = 32, 2680 .val_bits = 32,
2681 .max_register = 0x1fc0, 2681 .max_register = 0x1fc0,
2682 .fast_io = true, 2682 .fast_io = true,
2683 .val_format_endian = REGMAP_ENDIAN_LITTLE,
2684}; 2683};
2685 2684
2686static const struct qcom_cc_desc gcc_msm8974_desc = { 2685static const struct qcom_cc_desc gcc_msm8974_desc = {
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
index 62e79fadd5f7..db3998e5e2d8 100644
--- a/drivers/clk/qcom/lcc-ipq806x.c
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -419,7 +419,6 @@ static const struct regmap_config lcc_ipq806x_regmap_config = {
419 .val_bits = 32, 419 .val_bits = 32,
420 .max_register = 0xfc, 420 .max_register = 0xfc,
421 .fast_io = true, 421 .fast_io = true,
422 .val_format_endian = REGMAP_ENDIAN_LITTLE,
423}; 422};
424 423
425static const struct qcom_cc_desc lcc_ipq806x_desc = { 424static const struct qcom_cc_desc lcc_ipq806x_desc = {
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
index bf95bb0ea1b8..4fcf9d1d233c 100644
--- a/drivers/clk/qcom/lcc-msm8960.c
+++ b/drivers/clk/qcom/lcc-msm8960.c
@@ -524,7 +524,6 @@ static const struct regmap_config lcc_msm8960_regmap_config = {
524 .val_bits = 32, 524 .val_bits = 32,
525 .max_register = 0xfc, 525 .max_register = 0xfc,
526 .fast_io = true, 526 .fast_io = true,
527 .val_format_endian = REGMAP_ENDIAN_LITTLE,
528}; 527};
529 528
530static const struct qcom_cc_desc lcc_msm8960_desc = { 529static const struct qcom_cc_desc lcc_msm8960_desc = {
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index 1e703fda8a0f..30777f9f1a43 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -3368,7 +3368,6 @@ static const struct regmap_config mmcc_apq8084_regmap_config = {
3368 .val_bits = 32, 3368 .val_bits = 32,
3369 .max_register = 0x5104, 3369 .max_register = 0x5104,
3370 .fast_io = true, 3370 .fast_io = true,
3371 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3372}; 3371};
3373 3372
3374static const struct qcom_cc_desc mmcc_apq8084_desc = { 3373static const struct qcom_cc_desc mmcc_apq8084_desc = {
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index d73a048d3b9d..00e36192a1de 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -3029,7 +3029,6 @@ static const struct regmap_config mmcc_msm8960_regmap_config = {
3029 .val_bits = 32, 3029 .val_bits = 32,
3030 .max_register = 0x334, 3030 .max_register = 0x334,
3031 .fast_io = true, 3031 .fast_io = true,
3032 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3033}; 3032};
3034 3033
3035static const struct regmap_config mmcc_apq8064_regmap_config = { 3034static const struct regmap_config mmcc_apq8064_regmap_config = {
@@ -3038,7 +3037,6 @@ static const struct regmap_config mmcc_apq8064_regmap_config = {
3038 .val_bits = 32, 3037 .val_bits = 32,
3039 .max_register = 0x350, 3038 .max_register = 0x350,
3040 .fast_io = true, 3039 .fast_io = true,
3041 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3042}; 3040};
3043 3041
3044static const struct qcom_cc_desc mmcc_msm8960_desc = { 3042static const struct qcom_cc_desc mmcc_msm8960_desc = {
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index bbe28ed93669..9d790bcadf25 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -2594,7 +2594,6 @@ static const struct regmap_config mmcc_msm8974_regmap_config = {
2594 .val_bits = 32, 2594 .val_bits = 32,
2595 .max_register = 0x5104, 2595 .max_register = 0x5104,
2596 .fast_io = true, 2596 .fast_io = true,
2597 .val_format_endian = REGMAP_ENDIAN_LITTLE,
2598}; 2597};
2599 2598
2600static const struct qcom_cc_desc mmcc_msm8974_desc = { 2599static const struct qcom_cc_desc mmcc_msm8974_desc = {
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index ebce98033fbb..bc7fbac83ab7 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -133,7 +133,7 @@ PNAME(mux_spdif_p) = { "spdif_src", "spdif_frac", "xin12m" };
133PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" }; 133PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" };
134PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" }; 134PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" };
135PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" }; 135PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
136PNAME(mux_mac_p) = { "mac_pll_src", "ext_gmac" }; 136PNAME(mux_mac_p) = { "mac_pll_src", "rmii_clkin" };
137PNAME(mux_dclk_p) = { "dclk_lcdc", "dclk_cru" }; 137PNAME(mux_dclk_p) = { "dclk_lcdc", "dclk_cru" };
138 138
139static struct rockchip_pll_clock rk3036_pll_clks[] __initdata = { 139static struct rockchip_pll_clock rk3036_pll_clks[] __initdata = {
@@ -224,16 +224,16 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
224 RK2928_CLKGATE_CON(2), 2, GFLAGS), 224 RK2928_CLKGATE_CON(2), 2, GFLAGS),
225 225
226 COMPOSITE_NODIV(SCLK_TIMER0, "sclk_timer0", mux_timer_p, CLK_IGNORE_UNUSED, 226 COMPOSITE_NODIV(SCLK_TIMER0, "sclk_timer0", mux_timer_p, CLK_IGNORE_UNUSED,
227 RK2928_CLKSEL_CON(2), 4, 1, DFLAGS, 227 RK2928_CLKSEL_CON(2), 4, 1, MFLAGS,
228 RK2928_CLKGATE_CON(1), 0, GFLAGS), 228 RK2928_CLKGATE_CON(1), 0, GFLAGS),
229 COMPOSITE_NODIV(SCLK_TIMER1, "sclk_timer1", mux_timer_p, CLK_IGNORE_UNUSED, 229 COMPOSITE_NODIV(SCLK_TIMER1, "sclk_timer1", mux_timer_p, CLK_IGNORE_UNUSED,
230 RK2928_CLKSEL_CON(2), 5, 1, DFLAGS, 230 RK2928_CLKSEL_CON(2), 5, 1, MFLAGS,
231 RK2928_CLKGATE_CON(1), 1, GFLAGS), 231 RK2928_CLKGATE_CON(1), 1, GFLAGS),
232 COMPOSITE_NODIV(SCLK_TIMER2, "sclk_timer2", mux_timer_p, CLK_IGNORE_UNUSED, 232 COMPOSITE_NODIV(SCLK_TIMER2, "sclk_timer2", mux_timer_p, CLK_IGNORE_UNUSED,
233 RK2928_CLKSEL_CON(2), 6, 1, DFLAGS, 233 RK2928_CLKSEL_CON(2), 6, 1, MFLAGS,
234 RK2928_CLKGATE_CON(2), 4, GFLAGS), 234 RK2928_CLKGATE_CON(2), 4, GFLAGS),
235 COMPOSITE_NODIV(SCLK_TIMER3, "sclk_timer3", mux_timer_p, CLK_IGNORE_UNUSED, 235 COMPOSITE_NODIV(SCLK_TIMER3, "sclk_timer3", mux_timer_p, CLK_IGNORE_UNUSED,
236 RK2928_CLKSEL_CON(2), 7, 1, DFLAGS, 236 RK2928_CLKSEL_CON(2), 7, 1, MFLAGS,
237 RK2928_CLKGATE_CON(2), 5, GFLAGS), 237 RK2928_CLKGATE_CON(2), 5, GFLAGS),
238 238
239 MUX(0, "uart_pll_clk", mux_pll_src_apll_dpll_gpll_usb480m_p, 0, 239 MUX(0, "uart_pll_clk", mux_pll_src_apll_dpll_gpll_usb480m_p, 0,
@@ -242,11 +242,11 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
242 RK2928_CLKSEL_CON(13), 0, 7, DFLAGS, 242 RK2928_CLKSEL_CON(13), 0, 7, DFLAGS,
243 RK2928_CLKGATE_CON(1), 8, GFLAGS), 243 RK2928_CLKGATE_CON(1), 8, GFLAGS),
244 COMPOSITE_NOMUX(0, "uart1_src", "uart_pll_clk", 0, 244 COMPOSITE_NOMUX(0, "uart1_src", "uart_pll_clk", 0,
245 RK2928_CLKSEL_CON(13), 0, 7, DFLAGS, 245 RK2928_CLKSEL_CON(14), 0, 7, DFLAGS,
246 RK2928_CLKGATE_CON(1), 8, GFLAGS), 246 RK2928_CLKGATE_CON(1), 10, GFLAGS),
247 COMPOSITE_NOMUX(0, "uart2_src", "uart_pll_clk", 0, 247 COMPOSITE_NOMUX(0, "uart2_src", "uart_pll_clk", 0,
248 RK2928_CLKSEL_CON(13), 0, 7, DFLAGS, 248 RK2928_CLKSEL_CON(15), 0, 7, DFLAGS,
249 RK2928_CLKGATE_CON(1), 8, GFLAGS), 249 RK2928_CLKGATE_CON(1), 12, GFLAGS),
250 COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT, 250 COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
251 RK2928_CLKSEL_CON(17), 0, 251 RK2928_CLKSEL_CON(17), 0,
252 RK2928_CLKGATE_CON(1), 9, GFLAGS, 252 RK2928_CLKGATE_CON(1), 9, GFLAGS,
@@ -279,13 +279,13 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
279 RK2928_CLKGATE_CON(3), 2, GFLAGS), 279 RK2928_CLKGATE_CON(3), 2, GFLAGS),
280 280
281 COMPOSITE_NODIV(0, "sclk_sdmmc_src", mux_mmc_src_p, 0, 281 COMPOSITE_NODIV(0, "sclk_sdmmc_src", mux_mmc_src_p, 0,
282 RK2928_CLKSEL_CON(12), 8, 2, DFLAGS, 282 RK2928_CLKSEL_CON(12), 8, 2, MFLAGS,
283 RK2928_CLKGATE_CON(2), 11, GFLAGS), 283 RK2928_CLKGATE_CON(2), 11, GFLAGS),
284 DIV(SCLK_SDMMC, "sclk_sdmmc", "sclk_sdmmc_src", 0, 284 DIV(SCLK_SDMMC, "sclk_sdmmc", "sclk_sdmmc_src", 0,
285 RK2928_CLKSEL_CON(11), 0, 7, DFLAGS), 285 RK2928_CLKSEL_CON(11), 0, 7, DFLAGS),
286 286
287 COMPOSITE_NODIV(0, "sclk_sdio_src", mux_mmc_src_p, 0, 287 COMPOSITE_NODIV(0, "sclk_sdio_src", mux_mmc_src_p, 0,
288 RK2928_CLKSEL_CON(12), 10, 2, DFLAGS, 288 RK2928_CLKSEL_CON(12), 10, 2, MFLAGS,
289 RK2928_CLKGATE_CON(2), 13, GFLAGS), 289 RK2928_CLKGATE_CON(2), 13, GFLAGS),
290 DIV(SCLK_SDIO, "sclk_sdio", "sclk_sdio_src", 0, 290 DIV(SCLK_SDIO, "sclk_sdio", "sclk_sdio_src", 0,
291 RK2928_CLKSEL_CON(11), 8, 7, DFLAGS), 291 RK2928_CLKSEL_CON(11), 8, 7, DFLAGS),
@@ -344,12 +344,12 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
344 RK2928_CLKGATE_CON(10), 5, GFLAGS), 344 RK2928_CLKGATE_CON(10), 5, GFLAGS),
345 345
346 COMPOSITE_NOGATE(0, "mac_pll_src", mux_pll_src_3plls_p, 0, 346 COMPOSITE_NOGATE(0, "mac_pll_src", mux_pll_src_3plls_p, 0,
347 RK2928_CLKSEL_CON(21), 0, 2, MFLAGS, 4, 5, DFLAGS), 347 RK2928_CLKSEL_CON(21), 0, 2, MFLAGS, 9, 5, DFLAGS),
348 MUX(SCLK_MACREF, "mac_clk_ref", mux_mac_p, CLK_SET_RATE_PARENT, 348 MUX(SCLK_MACREF, "mac_clk_ref", mux_mac_p, CLK_SET_RATE_PARENT,
349 RK2928_CLKSEL_CON(21), 3, 1, MFLAGS), 349 RK2928_CLKSEL_CON(21), 3, 1, MFLAGS),
350 350
351 COMPOSITE_NOMUX(SCLK_MAC, "mac_clk", "mac_clk_ref", 0, 351 COMPOSITE_NOMUX(SCLK_MAC, "mac_clk", "mac_clk_ref", 0,
352 RK2928_CLKSEL_CON(21), 9, 5, DFLAGS, 352 RK2928_CLKSEL_CON(21), 4, 5, DFLAGS,
353 RK2928_CLKGATE_CON(2), 6, GFLAGS), 353 RK2928_CLKGATE_CON(2), 6, GFLAGS),
354 354
355 MUX(SCLK_HDMI, "dclk_hdmi", mux_dclk_p, 0, 355 MUX(SCLK_HDMI, "dclk_hdmi", mux_dclk_p, 0,
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index be0ede522269..21f3ea909fab 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -780,13 +780,13 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
780 GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3368_CLKGATE_CON(20), 0, GFLAGS), 780 GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3368_CLKGATE_CON(20), 0, GFLAGS),
781 781
782 /* pclk_pd_alive gates */ 782 /* pclk_pd_alive gates */
783 GATE(PCLK_TIMER1, "pclk_timer1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 8, GFLAGS), 783 GATE(PCLK_TIMER1, "pclk_timer1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 13, GFLAGS),
784 GATE(PCLK_TIMER0, "pclk_timer0", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 7, GFLAGS), 784 GATE(PCLK_TIMER0, "pclk_timer0", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 12, GFLAGS),
785 GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 12, GFLAGS), 785 GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(22), 9, GFLAGS),
786 GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 11, GFLAGS), 786 GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(22), 8, GFLAGS),
787 GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 3, GFLAGS), 787 GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 3, GFLAGS),
788 GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 2, GFLAGS), 788 GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 2, GFLAGS),
789 GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 1, GFLAGS), 789 GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 1, GFLAGS),
790 790
791 /* 791 /*
792 * pclk_vio gates 792 * pclk_vio gates
@@ -796,12 +796,12 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
796 GATE(0, "pclk_dphytx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 8, GFLAGS), 796 GATE(0, "pclk_dphytx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 8, GFLAGS),
797 797
798 /* pclk_pd_pmu gates */ 798 /* pclk_pd_pmu gates */
799 GATE(PCLK_PMUGRF, "pclk_pmugrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 0, GFLAGS), 799 GATE(PCLK_PMUGRF, "pclk_pmugrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 5, GFLAGS),
800 GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3368_CLKGATE_CON(17), 4, GFLAGS), 800 GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3368_CLKGATE_CON(23), 4, GFLAGS),
801 GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 3, GFLAGS), 801 GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 3, GFLAGS),
802 GATE(0, "pclk_pmu_noc", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 2, GFLAGS), 802 GATE(0, "pclk_pmu_noc", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 2, GFLAGS),
803 GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 1, GFLAGS), 803 GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 1, GFLAGS),
804 GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 2, GFLAGS), 804 GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 0, GFLAGS),
805 805
806 /* timer gates */ 806 /* timer gates */
807 GATE(0, "sclk_timer15", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 11, GFLAGS), 807 GATE(0, "sclk_timer15", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 11, GFLAGS),
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index e1fe8f35d45c..74e7544f861b 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -450,8 +450,10 @@ static int load_timings_from_dt(struct tegra_clk_emc *tegra,
450 struct emc_timing *timing = tegra->timings + (i++); 450 struct emc_timing *timing = tegra->timings + (i++);
451 451
452 err = load_one_timing_from_dt(tegra, timing, child); 452 err = load_one_timing_from_dt(tegra, timing, child);
453 if (err) 453 if (err) {
454 of_node_put(child);
454 return err; 455 return err;
456 }
455 457
456 timing->ram_code = ram_code; 458 timing->ram_code = ram_code;
457 } 459 }
@@ -499,9 +501,9 @@ struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
499 * fuses until the apbmisc driver is loaded. 501 * fuses until the apbmisc driver is loaded.
500 */ 502 */
501 err = load_timings_from_dt(tegra, node, node_ram_code); 503 err = load_timings_from_dt(tegra, node, node_ram_code);
504 of_node_put(node);
502 if (err) 505 if (err)
503 return ERR_PTR(err); 506 return ERR_PTR(err);
504 of_node_put(node);
505 break; 507 break;
506 } 508 }
507 509
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index 19ce0738ee76..62ea38187b71 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -11,6 +11,7 @@ enum clk_id {
11 tegra_clk_afi, 11 tegra_clk_afi,
12 tegra_clk_amx, 12 tegra_clk_amx,
13 tegra_clk_amx1, 13 tegra_clk_amx1,
14 tegra_clk_apb2ape,
14 tegra_clk_apbdma, 15 tegra_clk_apbdma,
15 tegra_clk_apbif, 16 tegra_clk_apbif,
16 tegra_clk_ape, 17 tegra_clk_ape,
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index a534bfab30b3..6ac3f843e7ca 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -86,15 +86,21 @@
86#define PLLE_SS_DISABLE (PLLE_SS_CNTL_BYPASS_SS | PLLE_SS_CNTL_INTERP_RESET |\ 86#define PLLE_SS_DISABLE (PLLE_SS_CNTL_BYPASS_SS | PLLE_SS_CNTL_INTERP_RESET |\
87 PLLE_SS_CNTL_SSC_BYP) 87 PLLE_SS_CNTL_SSC_BYP)
88#define PLLE_SS_MAX_MASK 0x1ff 88#define PLLE_SS_MAX_MASK 0x1ff
89#define PLLE_SS_MAX_VAL 0x25 89#define PLLE_SS_MAX_VAL_TEGRA114 0x25
90#define PLLE_SS_MAX_VAL_TEGRA210 0x21
90#define PLLE_SS_INC_MASK (0xff << 16) 91#define PLLE_SS_INC_MASK (0xff << 16)
91#define PLLE_SS_INC_VAL (0x1 << 16) 92#define PLLE_SS_INC_VAL (0x1 << 16)
92#define PLLE_SS_INCINTRV_MASK (0x3f << 24) 93#define PLLE_SS_INCINTRV_MASK (0x3f << 24)
93#define PLLE_SS_INCINTRV_VAL (0x20 << 24) 94#define PLLE_SS_INCINTRV_VAL_TEGRA114 (0x20 << 24)
95#define PLLE_SS_INCINTRV_VAL_TEGRA210 (0x23 << 24)
94#define PLLE_SS_COEFFICIENTS_MASK \ 96#define PLLE_SS_COEFFICIENTS_MASK \
95 (PLLE_SS_MAX_MASK | PLLE_SS_INC_MASK | PLLE_SS_INCINTRV_MASK) 97 (PLLE_SS_MAX_MASK | PLLE_SS_INC_MASK | PLLE_SS_INCINTRV_MASK)
96#define PLLE_SS_COEFFICIENTS_VAL \ 98#define PLLE_SS_COEFFICIENTS_VAL_TEGRA114 \
97 (PLLE_SS_MAX_VAL | PLLE_SS_INC_VAL | PLLE_SS_INCINTRV_VAL) 99 (PLLE_SS_MAX_VAL_TEGRA114 | PLLE_SS_INC_VAL |\
100 PLLE_SS_INCINTRV_VAL_TEGRA114)
101#define PLLE_SS_COEFFICIENTS_VAL_TEGRA210 \
102 (PLLE_SS_MAX_VAL_TEGRA210 | PLLE_SS_INC_VAL |\
103 PLLE_SS_INCINTRV_VAL_TEGRA210)
98 104
99#define PLLE_AUX_PLLP_SEL BIT(2) 105#define PLLE_AUX_PLLP_SEL BIT(2)
100#define PLLE_AUX_USE_LOCKDET BIT(3) 106#define PLLE_AUX_USE_LOCKDET BIT(3)
@@ -880,7 +886,7 @@ static int clk_plle_training(struct tegra_clk_pll *pll)
880static int clk_plle_enable(struct clk_hw *hw) 886static int clk_plle_enable(struct clk_hw *hw)
881{ 887{
882 struct tegra_clk_pll *pll = to_clk_pll(hw); 888 struct tegra_clk_pll *pll = to_clk_pll(hw);
883 unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk)); 889 unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
884 struct tegra_clk_pll_freq_table sel; 890 struct tegra_clk_pll_freq_table sel;
885 u32 val; 891 u32 val;
886 int err; 892 int err;
@@ -1378,7 +1384,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
1378 u32 val; 1384 u32 val;
1379 int ret; 1385 int ret;
1380 unsigned long flags = 0; 1386 unsigned long flags = 0;
1381 unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk)); 1387 unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
1382 1388
1383 if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate)) 1389 if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
1384 return -EINVAL; 1390 return -EINVAL;
@@ -1401,7 +1407,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
1401 val |= PLLE_MISC_IDDQ_SW_CTRL; 1407 val |= PLLE_MISC_IDDQ_SW_CTRL;
1402 val &= ~PLLE_MISC_IDDQ_SW_VALUE; 1408 val &= ~PLLE_MISC_IDDQ_SW_VALUE;
1403 val |= PLLE_MISC_PLLE_PTS; 1409 val |= PLLE_MISC_PLLE_PTS;
1404 val |= PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK; 1410 val &= ~(PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK);
1405 pll_writel_misc(val, pll); 1411 pll_writel_misc(val, pll);
1406 udelay(5); 1412 udelay(5);
1407 1413
@@ -1428,7 +1434,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
1428 val = pll_readl(PLLE_SS_CTRL, pll); 1434 val = pll_readl(PLLE_SS_CTRL, pll);
1429 val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT); 1435 val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT);
1430 val &= ~PLLE_SS_COEFFICIENTS_MASK; 1436 val &= ~PLLE_SS_COEFFICIENTS_MASK;
1431 val |= PLLE_SS_COEFFICIENTS_VAL; 1437 val |= PLLE_SS_COEFFICIENTS_VAL_TEGRA114;
1432 pll_writel(val, PLLE_SS_CTRL, pll); 1438 pll_writel(val, PLLE_SS_CTRL, pll);
1433 val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS); 1439 val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS);
1434 pll_writel(val, PLLE_SS_CTRL, pll); 1440 pll_writel(val, PLLE_SS_CTRL, pll);
@@ -2012,9 +2018,9 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
2012 struct tegra_clk_pll *pll = to_clk_pll(hw); 2018 struct tegra_clk_pll *pll = to_clk_pll(hw);
2013 struct tegra_clk_pll_freq_table sel; 2019 struct tegra_clk_pll_freq_table sel;
2014 u32 val; 2020 u32 val;
2015 int ret; 2021 int ret = 0;
2016 unsigned long flags = 0; 2022 unsigned long flags = 0;
2017 unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk)); 2023 unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
2018 2024
2019 if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate)) 2025 if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
2020 return -EINVAL; 2026 return -EINVAL;
@@ -2022,22 +2028,20 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
2022 if (pll->lock) 2028 if (pll->lock)
2023 spin_lock_irqsave(pll->lock, flags); 2029 spin_lock_irqsave(pll->lock, flags);
2024 2030
2031 val = pll_readl(pll->params->aux_reg, pll);
2032 if (val & PLLE_AUX_SEQ_ENABLE)
2033 goto out;
2034
2025 val = pll_readl_base(pll); 2035 val = pll_readl_base(pll);
2026 val &= ~BIT(30); /* Disable lock override */ 2036 val &= ~BIT(30); /* Disable lock override */
2027 pll_writel_base(val, pll); 2037 pll_writel_base(val, pll);
2028 2038
2029 val = pll_readl(pll->params->aux_reg, pll);
2030 val |= PLLE_AUX_ENABLE_SWCTL;
2031 val &= ~PLLE_AUX_SEQ_ENABLE;
2032 pll_writel(val, pll->params->aux_reg, pll);
2033 udelay(1);
2034
2035 val = pll_readl_misc(pll); 2039 val = pll_readl_misc(pll);
2036 val |= PLLE_MISC_LOCK_ENABLE; 2040 val |= PLLE_MISC_LOCK_ENABLE;
2037 val |= PLLE_MISC_IDDQ_SW_CTRL; 2041 val |= PLLE_MISC_IDDQ_SW_CTRL;
2038 val &= ~PLLE_MISC_IDDQ_SW_VALUE; 2042 val &= ~PLLE_MISC_IDDQ_SW_VALUE;
2039 val |= PLLE_MISC_PLLE_PTS; 2043 val |= PLLE_MISC_PLLE_PTS;
2040 val |= PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK; 2044 val &= ~(PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK);
2041 pll_writel_misc(val, pll); 2045 pll_writel_misc(val, pll);
2042 udelay(5); 2046 udelay(5);
2043 2047
@@ -2067,7 +2071,7 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
2067 val = pll_readl(PLLE_SS_CTRL, pll); 2071 val = pll_readl(PLLE_SS_CTRL, pll);
2068 val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT); 2072 val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT);
2069 val &= ~PLLE_SS_COEFFICIENTS_MASK; 2073 val &= ~PLLE_SS_COEFFICIENTS_MASK;
2070 val |= PLLE_SS_COEFFICIENTS_VAL; 2074 val |= PLLE_SS_COEFFICIENTS_VAL_TEGRA210;
2071 pll_writel(val, PLLE_SS_CTRL, pll); 2075 pll_writel(val, PLLE_SS_CTRL, pll);
2072 val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS); 2076 val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS);
2073 pll_writel(val, PLLE_SS_CTRL, pll); 2077 pll_writel(val, PLLE_SS_CTRL, pll);
@@ -2104,15 +2108,25 @@ static void clk_plle_tegra210_disable(struct clk_hw *hw)
2104 if (pll->lock) 2108 if (pll->lock)
2105 spin_lock_irqsave(pll->lock, flags); 2109 spin_lock_irqsave(pll->lock, flags);
2106 2110
2111 /* If PLLE HW sequencer is enabled, SW should not disable PLLE */
2112 val = pll_readl(pll->params->aux_reg, pll);
2113 if (val & PLLE_AUX_SEQ_ENABLE)
2114 goto out;
2115
2107 val = pll_readl_base(pll); 2116 val = pll_readl_base(pll);
2108 val &= ~PLLE_BASE_ENABLE; 2117 val &= ~PLLE_BASE_ENABLE;
2109 pll_writel_base(val, pll); 2118 pll_writel_base(val, pll);
2110 2119
2120 val = pll_readl(pll->params->aux_reg, pll);
2121 val |= PLLE_AUX_ENABLE_SWCTL | PLLE_AUX_SS_SWCTL;
2122 pll_writel(val, pll->params->aux_reg, pll);
2123
2111 val = pll_readl_misc(pll); 2124 val = pll_readl_misc(pll);
2112 val |= PLLE_MISC_IDDQ_SW_CTRL | PLLE_MISC_IDDQ_SW_VALUE; 2125 val |= PLLE_MISC_IDDQ_SW_CTRL | PLLE_MISC_IDDQ_SW_VALUE;
2113 pll_writel_misc(val, pll); 2126 pll_writel_misc(val, pll);
2114 udelay(1); 2127 udelay(1);
2115 2128
2129out:
2116 if (pll->lock) 2130 if (pll->lock)
2117 spin_unlock_irqrestore(pll->lock, flags); 2131 spin_unlock_irqrestore(pll->lock, flags);
2118} 2132}
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 6ad381a888a6..ea2b9cbf9e70 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -773,7 +773,7 @@ static struct tegra_periph_init_data periph_clks[] = {
773 XUSB("xusb_dev_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src), 773 XUSB("xusb_dev_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src),
774 XUSB("xusb_dev_src", mux_clkm_pllp_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src_8), 774 XUSB("xusb_dev_src", mux_clkm_pllp_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src_8),
775 MUX8("dbgapb", mux_pllp_clkm_2, CLK_SOURCE_DBGAPB, 185, TEGRA_PERIPH_NO_RESET, tegra_clk_dbgapb), 775 MUX8("dbgapb", mux_pllp_clkm_2, CLK_SOURCE_DBGAPB, 185, TEGRA_PERIPH_NO_RESET, tegra_clk_dbgapb),
776 MUX8("msenc", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVENC, 219, 0, tegra_clk_nvenc), 776 MUX8("nvenc", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVENC, 219, 0, tegra_clk_nvenc),
777 MUX8("nvdec", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVDEC, 194, 0, tegra_clk_nvdec), 777 MUX8("nvdec", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVDEC, 194, 0, tegra_clk_nvdec),
778 MUX8("nvjpg", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVJPG, 195, 0, tegra_clk_nvjpg), 778 MUX8("nvjpg", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVJPG, 195, 0, tegra_clk_nvjpg),
779 MUX8("ape", mux_plla_pllc4_out0_pllc_pllc4_out1_pllp_pllc4_out2_clkm, CLK_SOURCE_APE, 198, TEGRA_PERIPH_ON_APB, tegra_clk_ape), 779 MUX8("ape", mux_plla_pllc4_out0_pllc_pllc4_out1_pllp_pllc4_out2_clkm, CLK_SOURCE_APE, 198, TEGRA_PERIPH_ON_APB, tegra_clk_ape),
@@ -782,7 +782,7 @@ static struct tegra_periph_init_data periph_clks[] = {
782 NODIV("sor1", mux_clkm_sor1_brick_sor1_src, CLK_SOURCE_SOR1, 15, MASK(1), 183, 0, tegra_clk_sor1, &sor1_lock), 782 NODIV("sor1", mux_clkm_sor1_brick_sor1_src, CLK_SOURCE_SOR1, 15, MASK(1), 183, 0, tegra_clk_sor1, &sor1_lock),
783 MUX8("sdmmc_legacy", mux_pllp_out3_clkm_pllp_pllc4, CLK_SOURCE_SDMMC_LEGACY, 193, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_sdmmc_legacy), 783 MUX8("sdmmc_legacy", mux_pllp_out3_clkm_pllp_pllc4, CLK_SOURCE_SDMMC_LEGACY, 193, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_sdmmc_legacy),
784 MUX8("qspi", mux_pllp_pllc_pllc_out1_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_QSPI, 211, TEGRA_PERIPH_ON_APB, tegra_clk_qspi), 784 MUX8("qspi", mux_pllp_pllc_pllc_out1_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_QSPI, 211, TEGRA_PERIPH_ON_APB, tegra_clk_qspi),
785 MUX("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, TEGRA_PERIPH_ON_APB, tegra_clk_vi_i2c), 785 I2C("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, tegra_clk_vi_i2c),
786 MUX("mipibif", mux_pllp_clkm, CLK_SOURCE_MIPIBIF, 173, TEGRA_PERIPH_ON_APB, tegra_clk_mipibif), 786 MUX("mipibif", mux_pllp_clkm, CLK_SOURCE_MIPIBIF, 173, TEGRA_PERIPH_ON_APB, tegra_clk_mipibif),
787 MUX("uartape", mux_pllp_pllc_clkm, CLK_SOURCE_UARTAPE, 212, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_uartape), 787 MUX("uartape", mux_pllp_pllc_clkm, CLK_SOURCE_UARTAPE, 212, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_uartape),
788 MUX8("tsecb", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_TSECB, 206, 0, tegra_clk_tsecb), 788 MUX8("tsecb", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_TSECB, 206, 0, tegra_clk_tsecb),
@@ -829,6 +829,7 @@ static struct tegra_periph_init_data gate_clks[] = {
829 GATE("xusb_gate", "osc", 143, 0, tegra_clk_xusb_gate, 0), 829 GATE("xusb_gate", "osc", 143, 0, tegra_clk_xusb_gate, 0),
830 GATE("pll_p_out_cpu", "pll_p", 223, 0, tegra_clk_pll_p_out_cpu, 0), 830 GATE("pll_p_out_cpu", "pll_p", 223, 0, tegra_clk_pll_p_out_cpu, 0),
831 GATE("pll_p_out_adsp", "pll_p", 187, 0, tegra_clk_pll_p_out_adsp, 0), 831 GATE("pll_p_out_adsp", "pll_p", 187, 0, tegra_clk_pll_p_out_adsp, 0),
832 GATE("apb2ape", "clk_m", 107, 0, tegra_clk_apb2ape, 0),
832}; 833};
833 834
834static struct tegra_periph_init_data div_clks[] = { 835static struct tegra_periph_init_data div_clks[] = {
diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c
index 4559a20e3af6..474de0f0c26d 100644
--- a/drivers/clk/tegra/clk-tegra-super-gen4.c
+++ b/drivers/clk/tegra/clk-tegra-super-gen4.c
@@ -67,7 +67,7 @@ static const char *cclk_lp_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
67 "pll_p", "pll_p_out4", "unused", 67 "pll_p", "pll_p_out4", "unused",
68 "unused", "pll_x", "pll_x_out0" }; 68 "unused", "pll_x", "pll_x_out0" };
69 69
70const struct tegra_super_gen_info tegra_super_gen_info_gen4 = { 70static const struct tegra_super_gen_info tegra_super_gen_info_gen4 = {
71 .gen = gen4, 71 .gen = gen4,
72 .sclk_parents = sclk_parents, 72 .sclk_parents = sclk_parents,
73 .cclk_g_parents = cclk_g_parents, 73 .cclk_g_parents = cclk_g_parents,
@@ -93,7 +93,7 @@ static const char *cclk_lp_parents_gen5[] = { "clk_m", "unused", "clk_32k", "unu
93 "unused", "unused", "unused", "unused", 93 "unused", "unused", "unused", "unused",
94 "dfllCPU_out" }; 94 "dfllCPU_out" };
95 95
96const struct tegra_super_gen_info tegra_super_gen_info_gen5 = { 96static const struct tegra_super_gen_info tegra_super_gen_info_gen5 = {
97 .gen = gen5, 97 .gen = gen5,
98 .sclk_parents = sclk_parents_gen5, 98 .sclk_parents = sclk_parents_gen5,
99 .cclk_g_parents = cclk_g_parents_gen5, 99 .cclk_g_parents = cclk_g_parents_gen5,
@@ -171,7 +171,7 @@ static void __init tegra_sclk_init(void __iomem *clk_base,
171 *dt_clk = clk; 171 *dt_clk = clk;
172} 172}
173 173
174void __init tegra_super_clk_init(void __iomem *clk_base, 174static void __init tegra_super_clk_init(void __iomem *clk_base,
175 void __iomem *pmc_base, 175 void __iomem *pmc_base,
176 struct tegra_clk *tegra_clks, 176 struct tegra_clk *tegra_clks,
177 struct tegra_clk_pll_params *params, 177 struct tegra_clk_pll_params *params,
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 58514c44ea83..637041fd53ad 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -59,8 +59,8 @@
59#define PLLC3_MISC3 0x50c 59#define PLLC3_MISC3 0x50c
60 60
61#define PLLM_BASE 0x90 61#define PLLM_BASE 0x90
62#define PLLM_MISC0 0x9c
63#define PLLM_MISC1 0x98 62#define PLLM_MISC1 0x98
63#define PLLM_MISC2 0x9c
64#define PLLP_BASE 0xa0 64#define PLLP_BASE 0xa0
65#define PLLP_MISC0 0xac 65#define PLLP_MISC0 0xac
66#define PLLP_MISC1 0x680 66#define PLLP_MISC1 0x680
@@ -99,7 +99,7 @@
99#define PLLC4_MISC0 0x5a8 99#define PLLC4_MISC0 0x5a8
100#define PLLC4_OUT 0x5e4 100#define PLLC4_OUT 0x5e4
101#define PLLMB_BASE 0x5e8 101#define PLLMB_BASE 0x5e8
102#define PLLMB_MISC0 0x5ec 102#define PLLMB_MISC1 0x5ec
103#define PLLA1_BASE 0x6a4 103#define PLLA1_BASE 0x6a4
104#define PLLA1_MISC0 0x6a8 104#define PLLA1_MISC0 0x6a8
105#define PLLA1_MISC1 0x6ac 105#define PLLA1_MISC1 0x6ac
@@ -243,7 +243,8 @@ static unsigned long tegra210_input_freq[] = {
243}; 243};
244 244
245static const char *mux_pllmcp_clkm[] = { 245static const char *mux_pllmcp_clkm[] = {
246 "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_c2", "pll_c3", 246 "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_mb", "pll_mb",
247 "pll_p",
247}; 248};
248#define mux_pllmcp_clkm_idx NULL 249#define mux_pllmcp_clkm_idx NULL
249 250
@@ -367,12 +368,12 @@ static const char *mux_pllmcp_clkm[] = {
367/* PLLMB */ 368/* PLLMB */
368#define PLLMB_BASE_LOCK (1 << 27) 369#define PLLMB_BASE_LOCK (1 << 27)
369 370
370#define PLLMB_MISC0_LOCK_OVERRIDE (1 << 18) 371#define PLLMB_MISC1_LOCK_OVERRIDE (1 << 18)
371#define PLLMB_MISC0_IDDQ (1 << 17) 372#define PLLMB_MISC1_IDDQ (1 << 17)
372#define PLLMB_MISC0_LOCK_ENABLE (1 << 16) 373#define PLLMB_MISC1_LOCK_ENABLE (1 << 16)
373 374
374#define PLLMB_MISC0_DEFAULT_VALUE 0x00030000 375#define PLLMB_MISC1_DEFAULT_VALUE 0x00030000
375#define PLLMB_MISC0_WRITE_MASK 0x0007ffff 376#define PLLMB_MISC1_WRITE_MASK 0x0007ffff
376 377
377/* PLLP */ 378/* PLLP */
378#define PLLP_BASE_OVERRIDE (1 << 28) 379#define PLLP_BASE_OVERRIDE (1 << 28)
@@ -457,7 +458,8 @@ static void pllcx_check_defaults(struct tegra_clk_pll_params *params)
457 PLLCX_MISC3_WRITE_MASK); 458 PLLCX_MISC3_WRITE_MASK);
458} 459}
459 460
460void tegra210_pllcx_set_defaults(const char *name, struct tegra_clk_pll *pllcx) 461static void tegra210_pllcx_set_defaults(const char *name,
462 struct tegra_clk_pll *pllcx)
461{ 463{
462 pllcx->params->defaults_set = true; 464 pllcx->params->defaults_set = true;
463 465
@@ -482,22 +484,22 @@ void tegra210_pllcx_set_defaults(const char *name, struct tegra_clk_pll *pllcx)
482 udelay(1); 484 udelay(1);
483} 485}
484 486
485void _pllc_set_defaults(struct tegra_clk_pll *pllcx) 487static void _pllc_set_defaults(struct tegra_clk_pll *pllcx)
486{ 488{
487 tegra210_pllcx_set_defaults("PLL_C", pllcx); 489 tegra210_pllcx_set_defaults("PLL_C", pllcx);
488} 490}
489 491
490void _pllc2_set_defaults(struct tegra_clk_pll *pllcx) 492static void _pllc2_set_defaults(struct tegra_clk_pll *pllcx)
491{ 493{
492 tegra210_pllcx_set_defaults("PLL_C2", pllcx); 494 tegra210_pllcx_set_defaults("PLL_C2", pllcx);
493} 495}
494 496
495void _pllc3_set_defaults(struct tegra_clk_pll *pllcx) 497static void _pllc3_set_defaults(struct tegra_clk_pll *pllcx)
496{ 498{
497 tegra210_pllcx_set_defaults("PLL_C3", pllcx); 499 tegra210_pllcx_set_defaults("PLL_C3", pllcx);
498} 500}
499 501
500void _plla1_set_defaults(struct tegra_clk_pll *pllcx) 502static void _plla1_set_defaults(struct tegra_clk_pll *pllcx)
501{ 503{
502 tegra210_pllcx_set_defaults("PLL_A1", pllcx); 504 tegra210_pllcx_set_defaults("PLL_A1", pllcx);
503} 505}
@@ -507,7 +509,7 @@ void _plla1_set_defaults(struct tegra_clk_pll *pllcx)
507 * PLL with dynamic ramp and fractional SDM. Dynamic ramp is not used. 509 * PLL with dynamic ramp and fractional SDM. Dynamic ramp is not used.
508 * Fractional SDM is allowed to provide exact audio rates. 510 * Fractional SDM is allowed to provide exact audio rates.
509 */ 511 */
510void tegra210_plla_set_defaults(struct tegra_clk_pll *plla) 512static void tegra210_plla_set_defaults(struct tegra_clk_pll *plla)
511{ 513{
512 u32 mask; 514 u32 mask;
513 u32 val = readl_relaxed(clk_base + plla->params->base_reg); 515 u32 val = readl_relaxed(clk_base + plla->params->base_reg);
@@ -559,7 +561,7 @@ void tegra210_plla_set_defaults(struct tegra_clk_pll *plla)
559 * PLLD 561 * PLLD
560 * PLL with fractional SDM. 562 * PLL with fractional SDM.
561 */ 563 */
562void tegra210_plld_set_defaults(struct tegra_clk_pll *plld) 564static void tegra210_plld_set_defaults(struct tegra_clk_pll *plld)
563{ 565{
564 u32 val; 566 u32 val;
565 u32 mask = 0xffff; 567 u32 mask = 0xffff;
@@ -698,7 +700,7 @@ static void plldss_defaults(const char *pll_name, struct tegra_clk_pll *plldss,
698 udelay(1); 700 udelay(1);
699} 701}
700 702
701void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2) 703static void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2)
702{ 704{
703 plldss_defaults("PLL_D2", plld2, PLLD2_MISC0_DEFAULT_VALUE, 705 plldss_defaults("PLL_D2", plld2, PLLD2_MISC0_DEFAULT_VALUE,
704 PLLD2_MISC1_CFG_DEFAULT_VALUE, 706 PLLD2_MISC1_CFG_DEFAULT_VALUE,
@@ -706,7 +708,7 @@ void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2)
706 PLLD2_MISC3_CTRL2_DEFAULT_VALUE); 708 PLLD2_MISC3_CTRL2_DEFAULT_VALUE);
707} 709}
708 710
709void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp) 711static void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp)
710{ 712{
711 plldss_defaults("PLL_DP", plldp, PLLDP_MISC0_DEFAULT_VALUE, 713 plldss_defaults("PLL_DP", plldp, PLLDP_MISC0_DEFAULT_VALUE,
712 PLLDP_MISC1_CFG_DEFAULT_VALUE, 714 PLLDP_MISC1_CFG_DEFAULT_VALUE,
@@ -719,7 +721,7 @@ void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp)
719 * Base and misc0 layout is the same as PLLD2/PLLDP, but no SDM/SSC support. 721 * Base and misc0 layout is the same as PLLD2/PLLDP, but no SDM/SSC support.
720 * VCO is exposed to the clock tree via fixed 1/3 and 1/5 dividers. 722 * VCO is exposed to the clock tree via fixed 1/3 and 1/5 dividers.
721 */ 723 */
722void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4) 724static void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4)
723{ 725{
724 plldss_defaults("PLL_C4", pllc4, PLLC4_MISC0_DEFAULT_VALUE, 0, 0, 0); 726 plldss_defaults("PLL_C4", pllc4, PLLC4_MISC0_DEFAULT_VALUE, 0, 0, 0);
725} 727}
@@ -728,7 +730,7 @@ void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4)
728 * PLLRE 730 * PLLRE
729 * VCO is exposed to the clock tree directly along with post-divider output 731 * VCO is exposed to the clock tree directly along with post-divider output
730 */ 732 */
731void tegra210_pllre_set_defaults(struct tegra_clk_pll *pllre) 733static void tegra210_pllre_set_defaults(struct tegra_clk_pll *pllre)
732{ 734{
733 u32 mask; 735 u32 mask;
734 u32 val = readl_relaxed(clk_base + pllre->params->base_reg); 736 u32 val = readl_relaxed(clk_base + pllre->params->base_reg);
@@ -780,13 +782,13 @@ static void pllx_get_dyn_steps(struct clk_hw *hw, u32 *step_a, u32 *step_b)
780{ 782{
781 unsigned long input_rate; 783 unsigned long input_rate;
782 784
783 if (!IS_ERR_OR_NULL(hw->clk)) { 785 /* cf rate */
786 if (!IS_ERR_OR_NULL(hw->clk))
784 input_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); 787 input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
785 /* cf rate */ 788 else
786 input_rate /= tegra_pll_get_fixed_mdiv(hw, input_rate);
787 } else {
788 input_rate = 38400000; 789 input_rate = 38400000;
789 } 790
791 input_rate /= tegra_pll_get_fixed_mdiv(hw, input_rate);
790 792
791 switch (input_rate) { 793 switch (input_rate) {
792 case 12000000: 794 case 12000000:
@@ -841,7 +843,7 @@ static void pllx_check_defaults(struct tegra_clk_pll *pll)
841 PLLX_MISC5_WRITE_MASK); 843 PLLX_MISC5_WRITE_MASK);
842} 844}
843 845
844void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx) 846static void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx)
845{ 847{
846 u32 val; 848 u32 val;
847 u32 step_a, step_b; 849 u32 step_a, step_b;
@@ -901,7 +903,7 @@ void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx)
901} 903}
902 904
903/* PLLMB */ 905/* PLLMB */
904void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb) 906static void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
905{ 907{
906 u32 mask, val = readl_relaxed(clk_base + pllmb->params->base_reg); 908 u32 mask, val = readl_relaxed(clk_base + pllmb->params->base_reg);
907 909
@@ -914,15 +916,15 @@ void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
914 * PLL is ON: check if defaults already set, then set those 916 * PLL is ON: check if defaults already set, then set those
915 * that can be updated in flight. 917 * that can be updated in flight.
916 */ 918 */
917 val = PLLMB_MISC0_DEFAULT_VALUE & (~PLLMB_MISC0_IDDQ); 919 val = PLLMB_MISC1_DEFAULT_VALUE & (~PLLMB_MISC1_IDDQ);
918 mask = PLLMB_MISC0_LOCK_ENABLE | PLLMB_MISC0_LOCK_OVERRIDE; 920 mask = PLLMB_MISC1_LOCK_ENABLE | PLLMB_MISC1_LOCK_OVERRIDE;
919 _pll_misc_chk_default(clk_base, pllmb->params, 0, val, 921 _pll_misc_chk_default(clk_base, pllmb->params, 0, val,
920 ~mask & PLLMB_MISC0_WRITE_MASK); 922 ~mask & PLLMB_MISC1_WRITE_MASK);
921 923
922 /* Enable lock detect */ 924 /* Enable lock detect */
923 val = readl_relaxed(clk_base + pllmb->params->ext_misc_reg[0]); 925 val = readl_relaxed(clk_base + pllmb->params->ext_misc_reg[0]);
924 val &= ~mask; 926 val &= ~mask;
925 val |= PLLMB_MISC0_DEFAULT_VALUE & mask; 927 val |= PLLMB_MISC1_DEFAULT_VALUE & mask;
926 writel_relaxed(val, clk_base + pllmb->params->ext_misc_reg[0]); 928 writel_relaxed(val, clk_base + pllmb->params->ext_misc_reg[0]);
927 udelay(1); 929 udelay(1);
928 930
@@ -930,7 +932,7 @@ void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
930 } 932 }
931 933
932 /* set IDDQ, enable lock detect */ 934 /* set IDDQ, enable lock detect */
933 writel_relaxed(PLLMB_MISC0_DEFAULT_VALUE, 935 writel_relaxed(PLLMB_MISC1_DEFAULT_VALUE,
934 clk_base + pllmb->params->ext_misc_reg[0]); 936 clk_base + pllmb->params->ext_misc_reg[0]);
935 udelay(1); 937 udelay(1);
936} 938}
@@ -960,7 +962,7 @@ static void pllp_check_defaults(struct tegra_clk_pll *pll, bool enabled)
960 ~mask & PLLP_MISC1_WRITE_MASK); 962 ~mask & PLLP_MISC1_WRITE_MASK);
961} 963}
962 964
963void tegra210_pllp_set_defaults(struct tegra_clk_pll *pllp) 965static void tegra210_pllp_set_defaults(struct tegra_clk_pll *pllp)
964{ 966{
965 u32 mask; 967 u32 mask;
966 u32 val = readl_relaxed(clk_base + pllp->params->base_reg); 968 u32 val = readl_relaxed(clk_base + pllp->params->base_reg);
@@ -1022,7 +1024,7 @@ static void pllu_check_defaults(struct tegra_clk_pll *pll, bool hw_control)
1022 ~mask & PLLU_MISC1_WRITE_MASK); 1024 ~mask & PLLU_MISC1_WRITE_MASK);
1023} 1025}
1024 1026
1025void tegra210_pllu_set_defaults(struct tegra_clk_pll *pllu) 1027static void tegra210_pllu_set_defaults(struct tegra_clk_pll *pllu)
1026{ 1028{
1027 u32 val = readl_relaxed(clk_base + pllu->params->base_reg); 1029 u32 val = readl_relaxed(clk_base + pllu->params->base_reg);
1028 1030
@@ -1212,8 +1214,9 @@ static void tegra210_clk_pll_set_gain(struct tegra_clk_pll_freq_table *cfg)
1212 cfg->m *= PLL_SDM_COEFF; 1214 cfg->m *= PLL_SDM_COEFF;
1213} 1215}
1214 1216
1215unsigned long tegra210_clk_adjust_vco_min(struct tegra_clk_pll_params *params, 1217static unsigned long
1216 unsigned long parent_rate) 1218tegra210_clk_adjust_vco_min(struct tegra_clk_pll_params *params,
1219 unsigned long parent_rate)
1217{ 1220{
1218 unsigned long vco_min = params->vco_min; 1221 unsigned long vco_min = params->vco_min;
1219 1222
@@ -1386,7 +1389,7 @@ static struct tegra_clk_pll_params pll_c_params = {
1386 .mdiv_default = 3, 1389 .mdiv_default = 3,
1387 .div_nmp = &pllc_nmp, 1390 .div_nmp = &pllc_nmp,
1388 .freq_table = pll_cx_freq_table, 1391 .freq_table = pll_cx_freq_table,
1389 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1392 .flags = TEGRA_PLL_USE_LOCK,
1390 .set_defaults = _pllc_set_defaults, 1393 .set_defaults = _pllc_set_defaults,
1391 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1394 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1392}; 1395};
@@ -1425,7 +1428,7 @@ static struct tegra_clk_pll_params pll_c2_params = {
1425 .ext_misc_reg[2] = PLLC2_MISC2, 1428 .ext_misc_reg[2] = PLLC2_MISC2,
1426 .ext_misc_reg[3] = PLLC2_MISC3, 1429 .ext_misc_reg[3] = PLLC2_MISC3,
1427 .freq_table = pll_cx_freq_table, 1430 .freq_table = pll_cx_freq_table,
1428 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1431 .flags = TEGRA_PLL_USE_LOCK,
1429 .set_defaults = _pllc2_set_defaults, 1432 .set_defaults = _pllc2_set_defaults,
1430 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1433 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1431}; 1434};
@@ -1455,7 +1458,7 @@ static struct tegra_clk_pll_params pll_c3_params = {
1455 .ext_misc_reg[2] = PLLC3_MISC2, 1458 .ext_misc_reg[2] = PLLC3_MISC2,
1456 .ext_misc_reg[3] = PLLC3_MISC3, 1459 .ext_misc_reg[3] = PLLC3_MISC3,
1457 .freq_table = pll_cx_freq_table, 1460 .freq_table = pll_cx_freq_table,
1458 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1461 .flags = TEGRA_PLL_USE_LOCK,
1459 .set_defaults = _pllc3_set_defaults, 1462 .set_defaults = _pllc3_set_defaults,
1460 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1463 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1461}; 1464};
@@ -1505,7 +1508,6 @@ static struct tegra_clk_pll_params pll_c4_vco_params = {
1505 .base_reg = PLLC4_BASE, 1508 .base_reg = PLLC4_BASE,
1506 .misc_reg = PLLC4_MISC0, 1509 .misc_reg = PLLC4_MISC0,
1507 .lock_mask = PLL_BASE_LOCK, 1510 .lock_mask = PLL_BASE_LOCK,
1508 .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
1509 .lock_delay = 300, 1511 .lock_delay = 300,
1510 .max_p = PLL_QLIN_PDIV_MAX, 1512 .max_p = PLL_QLIN_PDIV_MAX,
1511 .ext_misc_reg[0] = PLLC4_MISC0, 1513 .ext_misc_reg[0] = PLLC4_MISC0,
@@ -1517,8 +1519,7 @@ static struct tegra_clk_pll_params pll_c4_vco_params = {
1517 .div_nmp = &pllss_nmp, 1519 .div_nmp = &pllss_nmp,
1518 .freq_table = pll_c4_vco_freq_table, 1520 .freq_table = pll_c4_vco_freq_table,
1519 .set_defaults = tegra210_pllc4_set_defaults, 1521 .set_defaults = tegra210_pllc4_set_defaults,
1520 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE | 1522 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
1521 TEGRA_PLL_VCO_OUT,
1522 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1523 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1523}; 1524};
1524 1525
@@ -1559,15 +1560,15 @@ static struct tegra_clk_pll_params pll_m_params = {
1559 .vco_min = 800000000, 1560 .vco_min = 800000000,
1560 .vco_max = 1866000000, 1561 .vco_max = 1866000000,
1561 .base_reg = PLLM_BASE, 1562 .base_reg = PLLM_BASE,
1562 .misc_reg = PLLM_MISC1, 1563 .misc_reg = PLLM_MISC2,
1563 .lock_mask = PLL_BASE_LOCK, 1564 .lock_mask = PLL_BASE_LOCK,
1564 .lock_enable_bit_idx = PLLM_MISC_LOCK_ENABLE, 1565 .lock_enable_bit_idx = PLLM_MISC_LOCK_ENABLE,
1565 .lock_delay = 300, 1566 .lock_delay = 300,
1566 .iddq_reg = PLLM_MISC0, 1567 .iddq_reg = PLLM_MISC2,
1567 .iddq_bit_idx = PLLM_IDDQ_BIT, 1568 .iddq_bit_idx = PLLM_IDDQ_BIT,
1568 .max_p = PLL_QLIN_PDIV_MAX, 1569 .max_p = PLL_QLIN_PDIV_MAX,
1569 .ext_misc_reg[0] = PLLM_MISC0, 1570 .ext_misc_reg[0] = PLLM_MISC2,
1570 .ext_misc_reg[0] = PLLM_MISC1, 1571 .ext_misc_reg[1] = PLLM_MISC1,
1571 .round_p_to_pdiv = pll_qlin_p_to_pdiv, 1572 .round_p_to_pdiv = pll_qlin_p_to_pdiv,
1572 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1573 .pdiv_tohw = pll_qlin_pdiv_to_hw,
1573 .div_nmp = &pllm_nmp, 1574 .div_nmp = &pllm_nmp,
@@ -1586,19 +1587,18 @@ static struct tegra_clk_pll_params pll_mb_params = {
1586 .vco_min = 800000000, 1587 .vco_min = 800000000,
1587 .vco_max = 1866000000, 1588 .vco_max = 1866000000,
1588 .base_reg = PLLMB_BASE, 1589 .base_reg = PLLMB_BASE,
1589 .misc_reg = PLLMB_MISC0, 1590 .misc_reg = PLLMB_MISC1,
1590 .lock_mask = PLL_BASE_LOCK, 1591 .lock_mask = PLL_BASE_LOCK,
1591 .lock_enable_bit_idx = PLLMB_MISC_LOCK_ENABLE,
1592 .lock_delay = 300, 1592 .lock_delay = 300,
1593 .iddq_reg = PLLMB_MISC0, 1593 .iddq_reg = PLLMB_MISC1,
1594 .iddq_bit_idx = PLLMB_IDDQ_BIT, 1594 .iddq_bit_idx = PLLMB_IDDQ_BIT,
1595 .max_p = PLL_QLIN_PDIV_MAX, 1595 .max_p = PLL_QLIN_PDIV_MAX,
1596 .ext_misc_reg[0] = PLLMB_MISC0, 1596 .ext_misc_reg[0] = PLLMB_MISC1,
1597 .round_p_to_pdiv = pll_qlin_p_to_pdiv, 1597 .round_p_to_pdiv = pll_qlin_p_to_pdiv,
1598 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1598 .pdiv_tohw = pll_qlin_pdiv_to_hw,
1599 .div_nmp = &pllm_nmp, 1599 .div_nmp = &pllm_nmp,
1600 .freq_table = pll_m_freq_table, 1600 .freq_table = pll_m_freq_table,
1601 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1601 .flags = TEGRA_PLL_USE_LOCK,
1602 .set_defaults = tegra210_pllmb_set_defaults, 1602 .set_defaults = tegra210_pllmb_set_defaults,
1603 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1603 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1604}; 1604};
@@ -1671,7 +1671,6 @@ static struct tegra_clk_pll_params pll_re_vco_params = {
1671 .base_reg = PLLRE_BASE, 1671 .base_reg = PLLRE_BASE,
1672 .misc_reg = PLLRE_MISC0, 1672 .misc_reg = PLLRE_MISC0,
1673 .lock_mask = PLLRE_MISC_LOCK, 1673 .lock_mask = PLLRE_MISC_LOCK,
1674 .lock_enable_bit_idx = PLLRE_MISC_LOCK_ENABLE,
1675 .lock_delay = 300, 1674 .lock_delay = 300,
1676 .max_p = PLL_QLIN_PDIV_MAX, 1675 .max_p = PLL_QLIN_PDIV_MAX,
1677 .ext_misc_reg[0] = PLLRE_MISC0, 1676 .ext_misc_reg[0] = PLLRE_MISC0,
@@ -1681,8 +1680,7 @@ static struct tegra_clk_pll_params pll_re_vco_params = {
1681 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1680 .pdiv_tohw = pll_qlin_pdiv_to_hw,
1682 .div_nmp = &pllre_nmp, 1681 .div_nmp = &pllre_nmp,
1683 .freq_table = pll_re_vco_freq_table, 1682 .freq_table = pll_re_vco_freq_table,
1684 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_LOCK_MISC | 1683 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_LOCK_MISC | TEGRA_PLL_VCO_OUT,
1685 TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_VCO_OUT,
1686 .set_defaults = tegra210_pllre_set_defaults, 1684 .set_defaults = tegra210_pllre_set_defaults,
1687 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1685 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1688}; 1686};
@@ -1712,7 +1710,6 @@ static struct tegra_clk_pll_params pll_p_params = {
1712 .base_reg = PLLP_BASE, 1710 .base_reg = PLLP_BASE,
1713 .misc_reg = PLLP_MISC0, 1711 .misc_reg = PLLP_MISC0,
1714 .lock_mask = PLL_BASE_LOCK, 1712 .lock_mask = PLL_BASE_LOCK,
1715 .lock_enable_bit_idx = PLLP_MISC_LOCK_ENABLE,
1716 .lock_delay = 300, 1713 .lock_delay = 300,
1717 .iddq_reg = PLLP_MISC0, 1714 .iddq_reg = PLLP_MISC0,
1718 .iddq_bit_idx = PLLXP_IDDQ_BIT, 1715 .iddq_bit_idx = PLLXP_IDDQ_BIT,
@@ -1721,8 +1718,7 @@ static struct tegra_clk_pll_params pll_p_params = {
1721 .div_nmp = &pllp_nmp, 1718 .div_nmp = &pllp_nmp,
1722 .freq_table = pll_p_freq_table, 1719 .freq_table = pll_p_freq_table,
1723 .fixed_rate = 408000000, 1720 .fixed_rate = 408000000,
1724 .flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK | 1721 .flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
1725 TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_VCO_OUT,
1726 .set_defaults = tegra210_pllp_set_defaults, 1722 .set_defaults = tegra210_pllp_set_defaults,
1727 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1723 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1728}; 1724};
@@ -1750,7 +1746,7 @@ static struct tegra_clk_pll_params pll_a1_params = {
1750 .ext_misc_reg[2] = PLLA1_MISC2, 1746 .ext_misc_reg[2] = PLLA1_MISC2,
1751 .ext_misc_reg[3] = PLLA1_MISC3, 1747 .ext_misc_reg[3] = PLLA1_MISC3,
1752 .freq_table = pll_cx_freq_table, 1748 .freq_table = pll_cx_freq_table,
1753 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1749 .flags = TEGRA_PLL_USE_LOCK,
1754 .set_defaults = _plla1_set_defaults, 1750 .set_defaults = _plla1_set_defaults,
1755 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1751 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1756}; 1752};
@@ -1787,7 +1783,6 @@ static struct tegra_clk_pll_params pll_a_params = {
1787 .base_reg = PLLA_BASE, 1783 .base_reg = PLLA_BASE,
1788 .misc_reg = PLLA_MISC0, 1784 .misc_reg = PLLA_MISC0,
1789 .lock_mask = PLL_BASE_LOCK, 1785 .lock_mask = PLL_BASE_LOCK,
1790 .lock_enable_bit_idx = PLLA_MISC_LOCK_ENABLE,
1791 .lock_delay = 300, 1786 .lock_delay = 300,
1792 .round_p_to_pdiv = pll_qlin_p_to_pdiv, 1787 .round_p_to_pdiv = pll_qlin_p_to_pdiv,
1793 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1788 .pdiv_tohw = pll_qlin_pdiv_to_hw,
@@ -1802,8 +1797,7 @@ static struct tegra_clk_pll_params pll_a_params = {
1802 .ext_misc_reg[1] = PLLA_MISC1, 1797 .ext_misc_reg[1] = PLLA_MISC1,
1803 .ext_misc_reg[2] = PLLA_MISC2, 1798 .ext_misc_reg[2] = PLLA_MISC2,
1804 .freq_table = pll_a_freq_table, 1799 .freq_table = pll_a_freq_table,
1805 .flags = TEGRA_PLL_USE_LOCK | TEGRA_MDIV_NEW | 1800 .flags = TEGRA_PLL_USE_LOCK | TEGRA_MDIV_NEW,
1806 TEGRA_PLL_HAS_LOCK_ENABLE,
1807 .set_defaults = tegra210_plla_set_defaults, 1801 .set_defaults = tegra210_plla_set_defaults,
1808 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1802 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1809 .set_gain = tegra210_clk_pll_set_gain, 1803 .set_gain = tegra210_clk_pll_set_gain,
@@ -1836,7 +1830,6 @@ static struct tegra_clk_pll_params pll_d_params = {
1836 .base_reg = PLLD_BASE, 1830 .base_reg = PLLD_BASE,
1837 .misc_reg = PLLD_MISC0, 1831 .misc_reg = PLLD_MISC0,
1838 .lock_mask = PLL_BASE_LOCK, 1832 .lock_mask = PLL_BASE_LOCK,
1839 .lock_enable_bit_idx = PLLD_MISC_LOCK_ENABLE,
1840 .lock_delay = 1000, 1833 .lock_delay = 1000,
1841 .iddq_reg = PLLD_MISC0, 1834 .iddq_reg = PLLD_MISC0,
1842 .iddq_bit_idx = PLLD_IDDQ_BIT, 1835 .iddq_bit_idx = PLLD_IDDQ_BIT,
@@ -1850,7 +1843,7 @@ static struct tegra_clk_pll_params pll_d_params = {
1850 .ext_misc_reg[0] = PLLD_MISC0, 1843 .ext_misc_reg[0] = PLLD_MISC0,
1851 .ext_misc_reg[1] = PLLD_MISC1, 1844 .ext_misc_reg[1] = PLLD_MISC1,
1852 .freq_table = pll_d_freq_table, 1845 .freq_table = pll_d_freq_table,
1853 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1846 .flags = TEGRA_PLL_USE_LOCK,
1854 .mdiv_default = 1, 1847 .mdiv_default = 1,
1855 .set_defaults = tegra210_plld_set_defaults, 1848 .set_defaults = tegra210_plld_set_defaults,
1856 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1849 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
@@ -1876,7 +1869,6 @@ static struct tegra_clk_pll_params pll_d2_params = {
1876 .base_reg = PLLD2_BASE, 1869 .base_reg = PLLD2_BASE,
1877 .misc_reg = PLLD2_MISC0, 1870 .misc_reg = PLLD2_MISC0,
1878 .lock_mask = PLL_BASE_LOCK, 1871 .lock_mask = PLL_BASE_LOCK,
1879 .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
1880 .lock_delay = 300, 1872 .lock_delay = 300,
1881 .iddq_reg = PLLD2_BASE, 1873 .iddq_reg = PLLD2_BASE,
1882 .iddq_bit_idx = PLLSS_IDDQ_BIT, 1874 .iddq_bit_idx = PLLSS_IDDQ_BIT,
@@ -1897,7 +1889,7 @@ static struct tegra_clk_pll_params pll_d2_params = {
1897 .mdiv_default = 1, 1889 .mdiv_default = 1,
1898 .freq_table = tegra210_pll_d2_freq_table, 1890 .freq_table = tegra210_pll_d2_freq_table,
1899 .set_defaults = tegra210_plld2_set_defaults, 1891 .set_defaults = tegra210_plld2_set_defaults,
1900 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1892 .flags = TEGRA_PLL_USE_LOCK,
1901 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1893 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1902 .set_gain = tegra210_clk_pll_set_gain, 1894 .set_gain = tegra210_clk_pll_set_gain,
1903 .adjust_vco = tegra210_clk_adjust_vco_min, 1895 .adjust_vco = tegra210_clk_adjust_vco_min,
@@ -1920,7 +1912,6 @@ static struct tegra_clk_pll_params pll_dp_params = {
1920 .base_reg = PLLDP_BASE, 1912 .base_reg = PLLDP_BASE,
1921 .misc_reg = PLLDP_MISC, 1913 .misc_reg = PLLDP_MISC,
1922 .lock_mask = PLL_BASE_LOCK, 1914 .lock_mask = PLL_BASE_LOCK,
1923 .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
1924 .lock_delay = 300, 1915 .lock_delay = 300,
1925 .iddq_reg = PLLDP_BASE, 1916 .iddq_reg = PLLDP_BASE,
1926 .iddq_bit_idx = PLLSS_IDDQ_BIT, 1917 .iddq_bit_idx = PLLSS_IDDQ_BIT,
@@ -1941,7 +1932,7 @@ static struct tegra_clk_pll_params pll_dp_params = {
1941 .mdiv_default = 1, 1932 .mdiv_default = 1,
1942 .freq_table = pll_dp_freq_table, 1933 .freq_table = pll_dp_freq_table,
1943 .set_defaults = tegra210_plldp_set_defaults, 1934 .set_defaults = tegra210_plldp_set_defaults,
1944 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1935 .flags = TEGRA_PLL_USE_LOCK,
1945 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1936 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1946 .set_gain = tegra210_clk_pll_set_gain, 1937 .set_gain = tegra210_clk_pll_set_gain,
1947 .adjust_vco = tegra210_clk_adjust_vco_min, 1938 .adjust_vco = tegra210_clk_adjust_vco_min,
@@ -1973,7 +1964,6 @@ static struct tegra_clk_pll_params pll_u_vco_params = {
1973 .base_reg = PLLU_BASE, 1964 .base_reg = PLLU_BASE,
1974 .misc_reg = PLLU_MISC0, 1965 .misc_reg = PLLU_MISC0,
1975 .lock_mask = PLL_BASE_LOCK, 1966 .lock_mask = PLL_BASE_LOCK,
1976 .lock_enable_bit_idx = PLLU_MISC_LOCK_ENABLE,
1977 .lock_delay = 1000, 1967 .lock_delay = 1000,
1978 .iddq_reg = PLLU_MISC0, 1968 .iddq_reg = PLLU_MISC0,
1979 .iddq_bit_idx = PLLU_IDDQ_BIT, 1969 .iddq_bit_idx = PLLU_IDDQ_BIT,
@@ -1983,8 +1973,7 @@ static struct tegra_clk_pll_params pll_u_vco_params = {
1983 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1973 .pdiv_tohw = pll_qlin_pdiv_to_hw,
1984 .div_nmp = &pllu_nmp, 1974 .div_nmp = &pllu_nmp,
1985 .freq_table = pll_u_freq_table, 1975 .freq_table = pll_u_freq_table,
1986 .flags = TEGRA_PLLU | TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE | 1976 .flags = TEGRA_PLLU | TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
1987 TEGRA_PLL_VCO_OUT,
1988 .set_defaults = tegra210_pllu_set_defaults, 1977 .set_defaults = tegra210_pllu_set_defaults,
1989 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1978 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1990}; 1979};
@@ -2218,6 +2207,7 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
2218 [tegra_clk_pll_c4_out1] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT1, .present = true }, 2207 [tegra_clk_pll_c4_out1] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT1, .present = true },
2219 [tegra_clk_pll_c4_out2] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT2, .present = true }, 2208 [tegra_clk_pll_c4_out2] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT2, .present = true },
2220 [tegra_clk_pll_c4_out3] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT3, .present = true }, 2209 [tegra_clk_pll_c4_out3] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT3, .present = true },
2210 [tegra_clk_apb2ape] = { .dt_id = TEGRA210_CLK_APB2APE, .present = true },
2221}; 2211};
2222 2212
2223static struct tegra_devclk devclks[] __initdata = { 2213static struct tegra_devclk devclks[] __initdata = {
@@ -2519,7 +2509,7 @@ static void __init tegra210_pll_init(void __iomem *clk_base,
2519 2509
2520 /* PLLU_VCO */ 2510 /* PLLU_VCO */
2521 val = readl(clk_base + pll_u_vco_params.base_reg); 2511 val = readl(clk_base + pll_u_vco_params.base_reg);
2522 val &= ~BIT(24); /* disable PLLU_OVERRIDE */ 2512 val &= ~PLLU_BASE_OVERRIDE; /* disable PLLU_OVERRIDE */
2523 writel(val, clk_base + pll_u_vco_params.base_reg); 2513 writel(val, clk_base + pll_u_vco_params.base_reg);
2524 2514
2525 clk = tegra_clk_register_pllre("pll_u_vco", "pll_ref", clk_base, pmc, 2515 clk = tegra_clk_register_pllre("pll_u_vco", "pll_ref", clk_base, pmc,
@@ -2738,8 +2728,6 @@ static struct tegra_clk_init_table init_table[] __initdata = {
2738 { TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 }, 2728 { TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 },
2739 { TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 }, 2729 { TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 },
2740 { TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 }, 2730 { TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 },
2741 { TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 },
2742 { TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 },
2743 { TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 }, 2731 { TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 },
2744 { TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 }, 2732 { TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 },
2745 { TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 }, 2733 { TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 },
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
index 1c300388782b..cc739291a3ce 100644
--- a/drivers/clk/ti/dpll3xxx.c
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -460,7 +460,8 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw)
460 460
461 parent = clk_hw_get_parent(hw); 461 parent = clk_hw_get_parent(hw);
462 462
463 if (clk_hw_get_rate(hw) == clk_get_rate(dd->clk_bypass)) { 463 if (clk_hw_get_rate(hw) ==
464 clk_hw_get_rate(__clk_get_hw(dd->clk_bypass))) {
464 WARN_ON(parent != __clk_get_hw(dd->clk_bypass)); 465 WARN_ON(parent != __clk_get_hw(dd->clk_bypass));
465 r = _omap3_noncore_dpll_bypass(clk); 466 r = _omap3_noncore_dpll_bypass(clk);
466 } else { 467 } else {
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index e62f8cb2c9b5..3bca438ecd19 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -78,6 +78,9 @@ static int vco_set(struct clk_icst *icst, struct icst_vco vco)
78 ret = regmap_read(icst->map, icst->vcoreg_off, &val); 78 ret = regmap_read(icst->map, icst->vcoreg_off, &val);
79 if (ret) 79 if (ret)
80 return ret; 80 return ret;
81
82 /* Mask the 18 bits used by the VCO */
83 val &= ~0x7ffff;
81 val |= vco.v | (vco.r << 9) | (vco.s << 16); 84 val |= vco.v | (vco.r << 9) | (vco.s << 16);
82 85
83 /* This magic unlocks the VCO so it can be controlled */ 86 /* This magic unlocks the VCO so it can be controlled */
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 659879a56dba..f93511031177 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -296,6 +296,7 @@ endif
296config QORIQ_CPUFREQ 296config QORIQ_CPUFREQ
297 tristate "CPU frequency scaling driver for Freescale QorIQ SoCs" 297 tristate "CPU frequency scaling driver for Freescale QorIQ SoCs"
298 depends on OF && COMMON_CLK && (PPC_E500MC || ARM) 298 depends on OF && COMMON_CLK && (PPC_E500MC || ARM)
299 depends on !CPU_THERMAL || THERMAL
299 select CLK_QORIQ 300 select CLK_QORIQ
300 help 301 help
301 This adds the CPUFreq driver support for Freescale QorIQ SoCs 302 This adds the CPUFreq driver support for Freescale QorIQ SoCs
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 0031069b64c9..14b1f9393b05 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -84,10 +84,10 @@ config ARM_KIRKWOOD_CPUFREQ
84 SoCs. 84 SoCs.
85 85
86config ARM_MT8173_CPUFREQ 86config ARM_MT8173_CPUFREQ
87 bool "Mediatek MT8173 CPUFreq support" 87 tristate "Mediatek MT8173 CPUFreq support"
88 depends on ARCH_MEDIATEK && REGULATOR 88 depends on ARCH_MEDIATEK && REGULATOR
89 depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST) 89 depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST)
90 depends on !CPU_THERMAL || THERMAL=y 90 depends on !CPU_THERMAL || THERMAL
91 select PM_OPP 91 select PM_OPP
92 help 92 help
93 This adds the CPUFreq driver support for Mediatek MT8173 SoC. 93 This adds the CPUFreq driver support for Mediatek MT8173 SoC.
diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mt8173-cpufreq.c
index 1efba340456d..2058e6d292ce 100644
--- a/drivers/cpufreq/mt8173-cpufreq.c
+++ b/drivers/cpufreq/mt8173-cpufreq.c
@@ -17,6 +17,7 @@
17#include <linux/cpu_cooling.h> 17#include <linux/cpu_cooling.h>
18#include <linux/cpufreq.h> 18#include <linux/cpufreq.h>
19#include <linux/cpumask.h> 19#include <linux/cpumask.h>
20#include <linux/module.h>
20#include <linux/of.h> 21#include <linux/of.h>
21#include <linux/platform_device.h> 22#include <linux/platform_device.h>
22#include <linux/pm_opp.h> 23#include <linux/pm_opp.h>
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index 848b93ee930f..fe9dce0245bf 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -500,6 +500,8 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
500 clk_set_min_rate(tegra->emc_clock, rate); 500 clk_set_min_rate(tegra->emc_clock, rate);
501 clk_set_rate(tegra->emc_clock, 0); 501 clk_set_rate(tegra->emc_clock, 0);
502 502
503 *freq = rate;
504
503 return 0; 505 return 0;
504} 506}
505 507
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index e893318560db..5ad0ec1f0e29 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -156,7 +156,6 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
156 156
157 /* Enable interrupts */ 157 /* Enable interrupts */
158 channel_set_bit(dw, MASK.XFER, dwc->mask); 158 channel_set_bit(dw, MASK.XFER, dwc->mask);
159 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
160 channel_set_bit(dw, MASK.ERROR, dwc->mask); 159 channel_set_bit(dw, MASK.ERROR, dwc->mask);
161 160
162 dwc->initialized = true; 161 dwc->initialized = true;
@@ -588,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
588 587
589 spin_unlock_irqrestore(&dwc->lock, flags); 588 spin_unlock_irqrestore(&dwc->lock, flags);
590 } 589 }
590
591 /* Re-enable interrupts */
592 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
591} 593}
592 594
593/* ------------------------------------------------------------------------- */ 595/* ------------------------------------------------------------------------- */
@@ -618,11 +620,8 @@ static void dw_dma_tasklet(unsigned long data)
618 dwc_scan_descriptors(dw, dwc); 620 dwc_scan_descriptors(dw, dwc);
619 } 621 }
620 622
621 /* 623 /* Re-enable interrupts */
622 * Re-enable interrupts.
623 */
624 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); 624 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
625 channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
626 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); 625 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
627} 626}
628 627
@@ -1261,6 +1260,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1261int dw_dma_cyclic_start(struct dma_chan *chan) 1260int dw_dma_cyclic_start(struct dma_chan *chan)
1262{ 1261{
1263 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1262 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1263 struct dw_dma *dw = to_dw_dma(chan->device);
1264 unsigned long flags; 1264 unsigned long flags;
1265 1265
1266 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { 1266 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
@@ -1269,7 +1269,12 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
1269 } 1269 }
1270 1270
1271 spin_lock_irqsave(&dwc->lock, flags); 1271 spin_lock_irqsave(&dwc->lock, flags);
1272
1273 /* Enable interrupts to perform cyclic transfer */
1274 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
1275
1272 dwc_dostart(dwc, dwc->cdesc->desc[0]); 1276 dwc_dostart(dwc, dwc->cdesc->desc[0]);
1277
1273 spin_unlock_irqrestore(&dwc->lock, flags); 1278 spin_unlock_irqrestore(&dwc->lock, flags);
1274 1279
1275 return 0; 1280 return 0;
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index 4c30fdd092b3..358f9689a3f5 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -108,6 +108,10 @@ static const struct pci_device_id dw_pci_id_table[] = {
108 108
109 /* Haswell */ 109 /* Haswell */
110 { PCI_VDEVICE(INTEL, 0x9c60) }, 110 { PCI_VDEVICE(INTEL, 0x9c60) },
111
112 /* Broadwell */
113 { PCI_VDEVICE(INTEL, 0x9ce0) },
114
111 { } 115 { }
112}; 116};
113MODULE_DEVICE_TABLE(pci, dw_pci_id_table); 117MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index d92d65549406..e3d7fcb69b4c 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -113,6 +113,9 @@
113#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ 113#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
114#define CHMAP_EXIST BIT(24) 114#define CHMAP_EXIST BIT(24)
115 115
116/* CCSTAT register */
117#define EDMA_CCSTAT_ACTV BIT(4)
118
116/* 119/*
117 * Max of 20 segments per channel to conserve PaRAM slots 120 * Max of 20 segments per channel to conserve PaRAM slots
118 * Also note that MAX_NR_SG should be atleast the no.of periods 121 * Also note that MAX_NR_SG should be atleast the no.of periods
@@ -1680,9 +1683,20 @@ static void edma_issue_pending(struct dma_chan *chan)
1680 spin_unlock_irqrestore(&echan->vchan.lock, flags); 1683 spin_unlock_irqrestore(&echan->vchan.lock, flags);
1681} 1684}
1682 1685
1686/*
1687 * This limit exists to avoid a possible infinite loop when waiting for proof
1688 * that a particular transfer is completed. This limit can be hit if there
1689 * are large bursts to/from slow devices or the CPU is never able to catch
1690 * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
1691 * RX-FIFO, as many as 55 loops have been seen.
1692 */
1693#define EDMA_MAX_TR_WAIT_LOOPS 1000
1694
1683static u32 edma_residue(struct edma_desc *edesc) 1695static u32 edma_residue(struct edma_desc *edesc)
1684{ 1696{
1685 bool dst = edesc->direction == DMA_DEV_TO_MEM; 1697 bool dst = edesc->direction == DMA_DEV_TO_MEM;
1698 int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
1699 struct edma_chan *echan = edesc->echan;
1686 struct edma_pset *pset = edesc->pset; 1700 struct edma_pset *pset = edesc->pset;
1687 dma_addr_t done, pos; 1701 dma_addr_t done, pos;
1688 int i; 1702 int i;
@@ -1691,7 +1705,32 @@ static u32 edma_residue(struct edma_desc *edesc)
1691 * We always read the dst/src position from the first RamPar 1705 * We always read the dst/src position from the first RamPar
1692 * pset. That's the one which is active now. 1706 * pset. That's the one which is active now.
1693 */ 1707 */
1694 pos = edma_get_position(edesc->echan->ecc, edesc->echan->slot[0], dst); 1708 pos = edma_get_position(echan->ecc, echan->slot[0], dst);
1709
1710 /*
1711 * "pos" may represent a transfer request that is still being
1712 * processed by the EDMACC or EDMATC. We will busy wait until
1713 * any one of the situations occurs:
1714 * 1. the DMA hardware is idle
1715 * 2. a new transfer request is setup
1716 * 3. we hit the loop limit
1717 */
1718 while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) {
1719 /* check if a new transfer request is setup */
1720 if (edma_get_position(echan->ecc,
1721 echan->slot[0], dst) != pos) {
1722 break;
1723 }
1724
1725 if (!--loop_count) {
1726 dev_dbg_ratelimited(echan->vchan.chan.device->dev,
1727 "%s: timeout waiting for PaRAM update\n",
1728 __func__);
1729 break;
1730 }
1731
1732 cpu_relax();
1733 }
1695 1734
1696 /* 1735 /*
1697 * Cyclic is simple. Just subtract pset[0].addr from pos. 1736 * Cyclic is simple. Just subtract pset[0].addr from pos.
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1d5df2ef148b..21539d5c54c3 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -861,32 +861,42 @@ void ioat_timer_event(unsigned long data)
861 return; 861 return;
862 } 862 }
863 863
864 spin_lock_bh(&ioat_chan->cleanup_lock);
865
866 /* handle the no-actives case */
867 if (!ioat_ring_active(ioat_chan)) {
868 spin_lock_bh(&ioat_chan->prep_lock);
869 check_active(ioat_chan);
870 spin_unlock_bh(&ioat_chan->prep_lock);
871 spin_unlock_bh(&ioat_chan->cleanup_lock);
872 return;
873 }
874
864 /* if we haven't made progress and we have already 875 /* if we haven't made progress and we have already
865 * acknowledged a pending completion once, then be more 876 * acknowledged a pending completion once, then be more
866 * forceful with a restart 877 * forceful with a restart
867 */ 878 */
868 spin_lock_bh(&ioat_chan->cleanup_lock);
869 if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) 879 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
870 __cleanup(ioat_chan, phys_complete); 880 __cleanup(ioat_chan, phys_complete);
871 else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) { 881 else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
882 u32 chanerr;
883
884 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
885 dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
886 dev_warn(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
887 status, chanerr);
888 dev_warn(to_dev(ioat_chan), "Active descriptors: %d\n",
889 ioat_ring_active(ioat_chan));
890
872 spin_lock_bh(&ioat_chan->prep_lock); 891 spin_lock_bh(&ioat_chan->prep_lock);
873 ioat_restart_channel(ioat_chan); 892 ioat_restart_channel(ioat_chan);
874 spin_unlock_bh(&ioat_chan->prep_lock); 893 spin_unlock_bh(&ioat_chan->prep_lock);
875 spin_unlock_bh(&ioat_chan->cleanup_lock); 894 spin_unlock_bh(&ioat_chan->cleanup_lock);
876 return; 895 return;
877 } else { 896 } else
878 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); 897 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
879 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
880 }
881
882 898
883 if (ioat_ring_active(ioat_chan)) 899 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
884 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
885 else {
886 spin_lock_bh(&ioat_chan->prep_lock);
887 check_active(ioat_chan);
888 spin_unlock_bh(&ioat_chan->prep_lock);
889 }
890 spin_unlock_bh(&ioat_chan->cleanup_lock); 900 spin_unlock_bh(&ioat_chan->cleanup_lock);
891} 901}
892 902
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index f2a0310ae771..debca824bed6 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -583,6 +583,8 @@ static void set_updater_desc(struct pxad_desc_sw *sw_desc,
583 (PXA_DCMD_LENGTH & sizeof(u32)); 583 (PXA_DCMD_LENGTH & sizeof(u32));
584 if (flags & DMA_PREP_INTERRUPT) 584 if (flags & DMA_PREP_INTERRUPT)
585 updater->dcmd |= PXA_DCMD_ENDIRQEN; 585 updater->dcmd |= PXA_DCMD_ENDIRQEN;
586 if (sw_desc->cyclic)
587 sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first;
586} 588}
587 589
588static bool is_desc_completed(struct virt_dma_desc *vd) 590static bool is_desc_completed(struct virt_dma_desc *vd)
@@ -673,6 +675,10 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
673 dev_dbg(&chan->vc.chan.dev->device, 675 dev_dbg(&chan->vc.chan.dev->device,
674 "%s(): checking txd %p[%x]: completed=%d\n", 676 "%s(): checking txd %p[%x]: completed=%d\n",
675 __func__, vd, vd->tx.cookie, is_desc_completed(vd)); 677 __func__, vd, vd->tx.cookie, is_desc_completed(vd));
678 if (to_pxad_sw_desc(vd)->cyclic) {
679 vchan_cyclic_callback(vd);
680 break;
681 }
676 if (is_desc_completed(vd)) { 682 if (is_desc_completed(vd)) {
677 list_del(&vd->node); 683 list_del(&vd->node);
678 vchan_cookie_complete(vd); 684 vchan_cookie_complete(vd);
@@ -1080,7 +1086,7 @@ pxad_prep_dma_cyclic(struct dma_chan *dchan,
1080 return NULL; 1086 return NULL;
1081 1087
1082 pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr); 1088 pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
1083 dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH | period_len); 1089 dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len);
1084 dev_dbg(&chan->vc.chan.dev->device, 1090 dev_dbg(&chan->vc.chan.dev->device,
1085 "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n", 1091 "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
1086 __func__, (unsigned long)buf_addr, len, period_len, dir, flags); 1092 __func__, (unsigned long)buf_addr, len, period_len, dir, flags);
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index cf41440aff91..d9ab0cd1d205 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -196,6 +196,44 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
196 return 0; 196 return 0;
197} 197}
198 198
199static void gpio_rcar_irq_bus_lock(struct irq_data *d)
200{
201 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
202 struct gpio_rcar_priv *p = gpiochip_get_data(gc);
203
204 pm_runtime_get_sync(&p->pdev->dev);
205}
206
207static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d)
208{
209 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
210 struct gpio_rcar_priv *p = gpiochip_get_data(gc);
211
212 pm_runtime_put(&p->pdev->dev);
213}
214
215
216static int gpio_rcar_irq_request_resources(struct irq_data *d)
217{
218 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
219 struct gpio_rcar_priv *p = gpiochip_get_data(gc);
220 int error;
221
222 error = pm_runtime_get_sync(&p->pdev->dev);
223 if (error < 0)
224 return error;
225
226 return 0;
227}
228
229static void gpio_rcar_irq_release_resources(struct irq_data *d)
230{
231 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
232 struct gpio_rcar_priv *p = gpiochip_get_data(gc);
233
234 pm_runtime_put(&p->pdev->dev);
235}
236
199static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id) 237static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
200{ 238{
201 struct gpio_rcar_priv *p = dev_id; 239 struct gpio_rcar_priv *p = dev_id;
@@ -450,6 +488,10 @@ static int gpio_rcar_probe(struct platform_device *pdev)
450 irq_chip->irq_unmask = gpio_rcar_irq_enable; 488 irq_chip->irq_unmask = gpio_rcar_irq_enable;
451 irq_chip->irq_set_type = gpio_rcar_irq_set_type; 489 irq_chip->irq_set_type = gpio_rcar_irq_set_type;
452 irq_chip->irq_set_wake = gpio_rcar_irq_set_wake; 490 irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
491 irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock;
492 irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock;
493 irq_chip->irq_request_resources = gpio_rcar_irq_request_resources;
494 irq_chip->irq_release_resources = gpio_rcar_irq_release_resources;
453 irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND; 495 irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
454 496
455 ret = gpiochip_add_data(gpio_chip, p); 497 ret = gpiochip_add_data(gpio_chip, p);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 89c3dd62ba21..119cdc2c43e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -77,7 +77,7 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
77 } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { 77 } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
78 /* Don't try to start link training before we 78 /* Don't try to start link training before we
79 * have the dpcd */ 79 * have the dpcd */
80 if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) 80 if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
81 return; 81 return;
82 82
83 /* set it to OFF so that drm_helper_connector_dpms() 83 /* set it to OFF so that drm_helper_connector_dpms()
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index acd066d0a805..8297bc319369 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -72,8 +72,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
72 72
73 struct drm_crtc *crtc = &amdgpuCrtc->base; 73 struct drm_crtc *crtc = &amdgpuCrtc->base;
74 unsigned long flags; 74 unsigned long flags;
75 unsigned i; 75 unsigned i, repcnt = 4;
76 int vpos, hpos, stat, min_udelay; 76 int vpos, hpos, stat, min_udelay = 0;
77 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; 77 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
78 78
79 amdgpu_flip_wait_fence(adev, &work->excl); 79 amdgpu_flip_wait_fence(adev, &work->excl);
@@ -96,7 +96,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
96 * In practice this won't execute very often unless on very fast 96 * In practice this won't execute very often unless on very fast
97 * machines because the time window for this to happen is very small. 97 * machines because the time window for this to happen is very small.
98 */ 98 */
99 for (;;) { 99 while (amdgpuCrtc->enabled && repcnt--) {
100 /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank 100 /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
101 * start in hpos, and to the "fudged earlier" vblank start in 101 * start in hpos, and to the "fudged earlier" vblank start in
102 * vpos. 102 * vpos.
@@ -114,10 +114,22 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
114 /* Sleep at least until estimated real start of hw vblank */ 114 /* Sleep at least until estimated real start of hw vblank */
115 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 115 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
116 min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); 116 min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
117 if (min_udelay > vblank->framedur_ns / 2000) {
118 /* Don't wait ridiculously long - something is wrong */
119 repcnt = 0;
120 break;
121 }
117 usleep_range(min_udelay, 2 * min_udelay); 122 usleep_range(min_udelay, 2 * min_udelay);
118 spin_lock_irqsave(&crtc->dev->event_lock, flags); 123 spin_lock_irqsave(&crtc->dev->event_lock, flags);
119 }; 124 };
120 125
126 if (!repcnt)
127 DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
128 "framedur %d, linedur %d, stat %d, vpos %d, "
129 "hpos %d\n", work->crtc_id, min_udelay,
130 vblank->framedur_ns / 1000,
131 vblank->linedur_ns / 1000, stat, vpos, hpos);
132
121 /* do the flip (mmio) */ 133 /* do the flip (mmio) */
122 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); 134 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
123 /* set the flip status */ 135 /* set the flip status */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7380f782cd14..d20c2a8929cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -596,7 +596,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
596 break; 596 break;
597 } 597 }
598 ttm_eu_backoff_reservation(&ticket, &list); 598 ttm_eu_backoff_reservation(&ticket, &list);
599 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) 599 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
600 !amdgpu_vm_debug)
600 amdgpu_gem_va_update_vm(adev, bo_va, args->operation); 601 amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
601 602
602 drm_gem_object_unreference_unlocked(gobj); 603 drm_gem_object_unreference_unlocked(gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 7d8d84eaea4a..95a4a25d8df9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -113,6 +113,10 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
113 struct drm_device *ddev = dev_get_drvdata(dev); 113 struct drm_device *ddev = dev_get_drvdata(dev);
114 struct amdgpu_device *adev = ddev->dev_private; 114 struct amdgpu_device *adev = ddev->dev_private;
115 115
116 if ((adev->flags & AMD_IS_PX) &&
117 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
118 return snprintf(buf, PAGE_SIZE, "off\n");
119
116 if (adev->pp_enabled) { 120 if (adev->pp_enabled) {
117 enum amd_dpm_forced_level level; 121 enum amd_dpm_forced_level level;
118 122
@@ -140,6 +144,11 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
140 enum amdgpu_dpm_forced_level level; 144 enum amdgpu_dpm_forced_level level;
141 int ret = 0; 145 int ret = 0;
142 146
147 /* Can't force performance level when the card is off */
148 if ((adev->flags & AMD_IS_PX) &&
149 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
150 return -EINVAL;
151
143 if (strncmp("low", buf, strlen("low")) == 0) { 152 if (strncmp("low", buf, strlen("low")) == 0) {
144 level = AMDGPU_DPM_FORCED_LEVEL_LOW; 153 level = AMDGPU_DPM_FORCED_LEVEL_LOW;
145 } else if (strncmp("high", buf, strlen("high")) == 0) { 154 } else if (strncmp("high", buf, strlen("high")) == 0) {
@@ -157,6 +166,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
157 mutex_lock(&adev->pm.mutex); 166 mutex_lock(&adev->pm.mutex);
158 if (adev->pm.dpm.thermal_active) { 167 if (adev->pm.dpm.thermal_active) {
159 count = -EINVAL; 168 count = -EINVAL;
169 mutex_unlock(&adev->pm.mutex);
160 goto fail; 170 goto fail;
161 } 171 }
162 ret = amdgpu_dpm_force_performance_level(adev, level); 172 ret = amdgpu_dpm_force_performance_level(adev, level);
@@ -167,8 +177,6 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
167 mutex_unlock(&adev->pm.mutex); 177 mutex_unlock(&adev->pm.mutex);
168 } 178 }
169fail: 179fail:
170 mutex_unlock(&adev->pm.mutex);
171
172 return count; 180 return count;
173} 181}
174 182
@@ -182,8 +190,14 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
182 char *buf) 190 char *buf)
183{ 191{
184 struct amdgpu_device *adev = dev_get_drvdata(dev); 192 struct amdgpu_device *adev = dev_get_drvdata(dev);
193 struct drm_device *ddev = adev->ddev;
185 int temp; 194 int temp;
186 195
196 /* Can't get temperature when the card is off */
197 if ((adev->flags & AMD_IS_PX) &&
198 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
199 return -EINVAL;
200
187 if (!adev->pp_enabled && !adev->pm.funcs->get_temperature) 201 if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
188 temp = 0; 202 temp = 0;
189 else 203 else
@@ -634,11 +648,6 @@ force:
634 648
635 /* update display watermarks based on new power state */ 649 /* update display watermarks based on new power state */
636 amdgpu_display_bandwidth_update(adev); 650 amdgpu_display_bandwidth_update(adev);
637 /* update displays */
638 amdgpu_dpm_display_configuration_changed(adev);
639
640 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
641 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
642 651
643 /* wait for the rings to drain */ 652 /* wait for the rings to drain */
644 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 653 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
@@ -655,6 +664,12 @@ force:
655 664
656 amdgpu_dpm_post_set_power_state(adev); 665 amdgpu_dpm_post_set_power_state(adev);
657 666
667 /* update displays */
668 amdgpu_dpm_display_configuration_changed(adev);
669
670 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
671 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
672
658 if (adev->pm.funcs->force_performance_level) { 673 if (adev->pm.funcs->force_performance_level) {
659 if (adev->pm.dpm.thermal_active) { 674 if (adev->pm.dpm.thermal_active) {
660 enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level; 675 enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
@@ -847,12 +862,16 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
847 struct drm_info_node *node = (struct drm_info_node *) m->private; 862 struct drm_info_node *node = (struct drm_info_node *) m->private;
848 struct drm_device *dev = node->minor->dev; 863 struct drm_device *dev = node->minor->dev;
849 struct amdgpu_device *adev = dev->dev_private; 864 struct amdgpu_device *adev = dev->dev_private;
865 struct drm_device *ddev = adev->ddev;
850 866
851 if (!adev->pm.dpm_enabled) { 867 if (!adev->pm.dpm_enabled) {
852 seq_printf(m, "dpm not enabled\n"); 868 seq_printf(m, "dpm not enabled\n");
853 return 0; 869 return 0;
854 } 870 }
855 if (adev->pp_enabled) { 871 if ((adev->flags & AMD_IS_PX) &&
872 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
873 seq_printf(m, "PX asic powered off\n");
874 } else if (adev->pp_enabled) {
856 amdgpu_dpm_debugfs_print_current_performance_level(adev, m); 875 amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
857 } else { 876 } else {
858 mutex_lock(&adev->pm.mutex); 877 mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index b9d0d55f6b47..3cb6d6c413c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -143,8 +143,10 @@ static int amdgpu_pp_late_init(void *handle)
143 adev->powerplay.pp_handle); 143 adev->powerplay.pp_handle);
144 144
145#ifdef CONFIG_DRM_AMD_POWERPLAY 145#ifdef CONFIG_DRM_AMD_POWERPLAY
146 if (adev->pp_enabled) 146 if (adev->pp_enabled) {
147 amdgpu_pm_sysfs_init(adev); 147 amdgpu_pm_sysfs_init(adev);
148 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
149 }
148#endif 150#endif
149 return ret; 151 return ret;
150} 152}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 6442a06d6fdc..1cbb16e15307 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -712,7 +712,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
712 0, PAGE_SIZE, 712 0, PAGE_SIZE,
713 PCI_DMA_BIDIRECTIONAL); 713 PCI_DMA_BIDIRECTIONAL);
714 if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) { 714 if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
715 while (--i) { 715 while (i--) {
716 pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i], 716 pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
717 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 717 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
718 gtt->ttm.dma_address[i] = 0; 718 gtt->ttm.dma_address[i] = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 9056355309d1..e7ef2261ff4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -2202,8 +2202,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
2202 AMD_PG_STATE_GATE); 2202 AMD_PG_STATE_GATE);
2203 2203
2204 cz_enable_vce_dpm(adev, false); 2204 cz_enable_vce_dpm(adev, false);
2205 /* TODO: to figure out why vce can't be poweroff. */ 2205 cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF);
2206 /* cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF); */
2207 pi->vce_power_gated = true; 2206 pi->vce_power_gated = true;
2208 } else { 2207 } else {
2209 cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerON); 2208 cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerON);
@@ -2226,10 +2225,8 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
2226 } 2225 }
2227 } else { /*pi->caps_vce_pg*/ 2226 } else { /*pi->caps_vce_pg*/
2228 cz_update_vce_dpm(adev); 2227 cz_update_vce_dpm(adev);
2229 cz_enable_vce_dpm(adev, true); 2228 cz_enable_vce_dpm(adev, !gate);
2230 } 2229 }
2231
2232 return;
2233} 2230}
2234 2231
2235const struct amd_ip_funcs cz_dpm_ip_funcs = { 2232const struct amd_ip_funcs cz_dpm_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 7732059ae30f..06602df707f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3628,6 +3628,19 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3628 unsigned vm_id, uint64_t pd_addr) 3628 unsigned vm_id, uint64_t pd_addr)
3629{ 3629{
3630 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); 3630 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
3631 uint32_t seq = ring->fence_drv.sync_seq;
3632 uint64_t addr = ring->fence_drv.gpu_addr;
3633
3634 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3635 amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
3636 WAIT_REG_MEM_FUNCTION(3) | /* equal */
3637 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
3638 amdgpu_ring_write(ring, addr & 0xfffffffc);
3639 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
3640 amdgpu_ring_write(ring, seq);
3641 amdgpu_ring_write(ring, 0xffffffff);
3642 amdgpu_ring_write(ring, 4); /* poll interval */
3643
3631 if (usepfp) { 3644 if (usepfp) {
3632 /* synce CE with ME to prevent CE fetch CEIB before context switch done */ 3645 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
3633 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 3646 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 8f8ec37ecd88..7086ac17abee 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4809,7 +4809,8 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4809 4809
4810 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 4810 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
4811 amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ 4811 amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
4812 WAIT_REG_MEM_FUNCTION(3))); /* equal */ 4812 WAIT_REG_MEM_FUNCTION(3) | /* equal */
4813 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
4813 amdgpu_ring_write(ring, addr & 0xfffffffc); 4814 amdgpu_ring_write(ring, addr & 0xfffffffc);
4814 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 4815 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
4815 amdgpu_ring_write(ring, seq); 4816 amdgpu_ring_write(ring, seq);
@@ -4995,7 +4996,7 @@ static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4995 case AMDGPU_IRQ_STATE_ENABLE: 4996 case AMDGPU_IRQ_STATE_ENABLE:
4996 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); 4997 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4997 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4998 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4998 PRIV_REG_INT_ENABLE, 0); 4999 PRIV_REG_INT_ENABLE, 1);
4999 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); 5000 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
5000 break; 5001 break;
5001 default: 5002 default:
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index aa67244a77ae..589599f66fcc 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -402,8 +402,11 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
402 402
403 data.requested_ui_label = power_state_convert(ps); 403 data.requested_ui_label = power_state_convert(ps);
404 ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); 404 ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
405 break;
405 } 406 }
406 break; 407 case AMD_PP_EVENT_COMPLETE_INIT:
408 ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
409 break;
407 default: 410 default:
408 break; 411 break;
409 } 412 }
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 83be3cf210e0..6b52c78cb404 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -165,6 +165,7 @@ const struct action_chain resume_action_chain = {
165}; 165};
166 166
167static const pem_event_action *complete_init_event[] = { 167static const pem_event_action *complete_init_event[] = {
168 unblock_adjust_power_state_tasks,
168 adjust_power_state_tasks, 169 adjust_power_state_tasks,
169 enable_gfx_clock_gating_tasks, 170 enable_gfx_clock_gating_tasks,
170 enable_gfx_voltage_island_power_gating_tasks, 171 enable_gfx_voltage_island_power_gating_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
index 52a3efc97f05..46410e3c7349 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
@@ -31,7 +31,7 @@
31static int pem_init(struct pp_eventmgr *eventmgr) 31static int pem_init(struct pp_eventmgr *eventmgr)
32{ 32{
33 int result = 0; 33 int result = 0;
34 struct pem_event_data event_data; 34 struct pem_event_data event_data = { {0} };
35 35
36 /* Initialize PowerPlay feature info */ 36 /* Initialize PowerPlay feature info */
37 pem_init_feature_info(eventmgr); 37 pem_init_feature_info(eventmgr);
@@ -52,7 +52,7 @@ static int pem_init(struct pp_eventmgr *eventmgr)
52 52
53static void pem_fini(struct pp_eventmgr *eventmgr) 53static void pem_fini(struct pp_eventmgr *eventmgr)
54{ 54{
55 struct pem_event_data event_data; 55 struct pem_event_data event_data = { {0} };
56 56
57 pem_uninit_featureInfo(eventmgr); 57 pem_uninit_featureInfo(eventmgr);
58 pem_unregister_interrupts(eventmgr); 58 pem_unregister_interrupts(eventmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index ad7700822a1c..ff08ce41bde9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -226,7 +226,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
226 } 226 }
227 } else { 227 } else {
228 cz_dpm_update_vce_dpm(hwmgr); 228 cz_dpm_update_vce_dpm(hwmgr);
229 cz_enable_disable_vce_dpm(hwmgr, true); 229 cz_enable_disable_vce_dpm(hwmgr, !bgate);
230 return 0; 230 return 0;
231 } 231 }
232 232
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 9759009d1da3..b1480acbb3c3 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -227,7 +227,7 @@ static int ast_get_dram_info(struct drm_device *dev)
227 } while (ast_read32(ast, 0x10000) != 0x01); 227 } while (ast_read32(ast, 0x10000) != 0x01);
228 data = ast_read32(ast, 0x10004); 228 data = ast_read32(ast, 0x10004);
229 229
230 if (data & 0x400) 230 if (data & 0x40)
231 ast->dram_bus_width = 16; 231 ast->dram_bus_width = 16;
232 else 232 else
233 ast->dram_bus_width = 32; 233 ast->dram_bus_width = 32;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3f74193885f1..9a7b44616b55 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -65,8 +65,6 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
65 */ 65 */
66 state->allow_modeset = true; 66 state->allow_modeset = true;
67 67
68 state->num_connector = ACCESS_ONCE(dev->mode_config.num_connector);
69
70 state->crtcs = kcalloc(dev->mode_config.num_crtc, 68 state->crtcs = kcalloc(dev->mode_config.num_crtc,
71 sizeof(*state->crtcs), GFP_KERNEL); 69 sizeof(*state->crtcs), GFP_KERNEL);
72 if (!state->crtcs) 70 if (!state->crtcs)
@@ -83,16 +81,6 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
83 sizeof(*state->plane_states), GFP_KERNEL); 81 sizeof(*state->plane_states), GFP_KERNEL);
84 if (!state->plane_states) 82 if (!state->plane_states)
85 goto fail; 83 goto fail;
86 state->connectors = kcalloc(state->num_connector,
87 sizeof(*state->connectors),
88 GFP_KERNEL);
89 if (!state->connectors)
90 goto fail;
91 state->connector_states = kcalloc(state->num_connector,
92 sizeof(*state->connector_states),
93 GFP_KERNEL);
94 if (!state->connector_states)
95 goto fail;
96 84
97 state->dev = dev; 85 state->dev = dev;
98 86
@@ -823,19 +811,27 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
823 811
824 index = drm_connector_index(connector); 812 index = drm_connector_index(connector);
825 813
826 /*
827 * Construction of atomic state updates can race with a connector
828 * hot-add which might overflow. In this case flip the table and just
829 * restart the entire ioctl - no one is fast enough to livelock a cpu
830 * with physical hotplug events anyway.
831 *
832 * Note that we only grab the indexes once we have the right lock to
833 * prevent hotplug/unplugging of connectors. So removal is no problem,
834 * at most the array is a bit too large.
835 */
836 if (index >= state->num_connector) { 814 if (index >= state->num_connector) {
837 DRM_DEBUG_ATOMIC("Hot-added connector would overflow state array, restarting\n"); 815 struct drm_connector **c;
838 return ERR_PTR(-EAGAIN); 816 struct drm_connector_state **cs;
817 int alloc = max(index + 1, config->num_connector);
818
819 c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
820 if (!c)
821 return ERR_PTR(-ENOMEM);
822
823 state->connectors = c;
824 memset(&state->connectors[state->num_connector], 0,
825 sizeof(*state->connectors) * (alloc - state->num_connector));
826
827 cs = krealloc(state->connector_states, alloc * sizeof(*state->connector_states), GFP_KERNEL);
828 if (!cs)
829 return ERR_PTR(-ENOMEM);
830
831 state->connector_states = cs;
832 memset(&state->connector_states[state->num_connector], 0,
833 sizeof(*state->connector_states) * (alloc - state->num_connector));
834 state->num_connector = alloc;
839 } 835 }
840 836
841 if (state->connector_states[index]) 837 if (state->connector_states[index])
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 7c523060a076..4f2d3e161593 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1493,7 +1493,7 @@ void drm_atomic_helper_swap_state(struct drm_device *dev,
1493{ 1493{
1494 int i; 1494 int i;
1495 1495
1496 for (i = 0; i < dev->mode_config.num_connector; i++) { 1496 for (i = 0; i < state->num_connector; i++) {
1497 struct drm_connector *connector = state->connectors[i]; 1497 struct drm_connector *connector = state->connectors[i];
1498 1498
1499 if (!connector) 1499 if (!connector)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index d40bab29747e..f6191215b2cb 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -918,12 +918,19 @@ int drm_connector_init(struct drm_device *dev,
918 connector->base.properties = &connector->properties; 918 connector->base.properties = &connector->properties;
919 connector->dev = dev; 919 connector->dev = dev;
920 connector->funcs = funcs; 920 connector->funcs = funcs;
921
922 connector->connector_id = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
923 if (connector->connector_id < 0) {
924 ret = connector->connector_id;
925 goto out_put;
926 }
927
921 connector->connector_type = connector_type; 928 connector->connector_type = connector_type;
922 connector->connector_type_id = 929 connector->connector_type_id =
923 ida_simple_get(connector_ida, 1, 0, GFP_KERNEL); 930 ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
924 if (connector->connector_type_id < 0) { 931 if (connector->connector_type_id < 0) {
925 ret = connector->connector_type_id; 932 ret = connector->connector_type_id;
926 goto out_put; 933 goto out_put_id;
927 } 934 }
928 connector->name = 935 connector->name =
929 kasprintf(GFP_KERNEL, "%s-%d", 936 kasprintf(GFP_KERNEL, "%s-%d",
@@ -931,7 +938,7 @@ int drm_connector_init(struct drm_device *dev,
931 connector->connector_type_id); 938 connector->connector_type_id);
932 if (!connector->name) { 939 if (!connector->name) {
933 ret = -ENOMEM; 940 ret = -ENOMEM;
934 goto out_put; 941 goto out_put_type_id;
935 } 942 }
936 943
937 INIT_LIST_HEAD(&connector->probed_modes); 944 INIT_LIST_HEAD(&connector->probed_modes);
@@ -959,7 +966,12 @@ int drm_connector_init(struct drm_device *dev,
959 } 966 }
960 967
961 connector->debugfs_entry = NULL; 968 connector->debugfs_entry = NULL;
962 969out_put_type_id:
970 if (ret)
971 ida_remove(connector_ida, connector->connector_type_id);
972out_put_id:
973 if (ret)
974 ida_remove(&config->connector_ida, connector->connector_id);
963out_put: 975out_put:
964 if (ret) 976 if (ret)
965 drm_mode_object_put(dev, &connector->base); 977 drm_mode_object_put(dev, &connector->base);
@@ -996,6 +1008,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
996 ida_remove(&drm_connector_enum_list[connector->connector_type].ida, 1008 ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
997 connector->connector_type_id); 1009 connector->connector_type_id);
998 1010
1011 ida_remove(&dev->mode_config.connector_ida,
1012 connector->connector_id);
1013
999 kfree(connector->display_info.bus_formats); 1014 kfree(connector->display_info.bus_formats);
1000 drm_mode_object_put(dev, &connector->base); 1015 drm_mode_object_put(dev, &connector->base);
1001 kfree(connector->name); 1016 kfree(connector->name);
@@ -1013,32 +1028,6 @@ void drm_connector_cleanup(struct drm_connector *connector)
1013EXPORT_SYMBOL(drm_connector_cleanup); 1028EXPORT_SYMBOL(drm_connector_cleanup);
1014 1029
1015/** 1030/**
1016 * drm_connector_index - find the index of a registered connector
1017 * @connector: connector to find index for
1018 *
1019 * Given a registered connector, return the index of that connector within a DRM
1020 * device's list of connectors.
1021 */
1022unsigned int drm_connector_index(struct drm_connector *connector)
1023{
1024 unsigned int index = 0;
1025 struct drm_connector *tmp;
1026 struct drm_mode_config *config = &connector->dev->mode_config;
1027
1028 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
1029
1030 drm_for_each_connector(tmp, connector->dev) {
1031 if (tmp == connector)
1032 return index;
1033
1034 index++;
1035 }
1036
1037 BUG();
1038}
1039EXPORT_SYMBOL(drm_connector_index);
1040
1041/**
1042 * drm_connector_register - register a connector 1031 * drm_connector_register - register a connector
1043 * @connector: the connector to register 1032 * @connector: the connector to register
1044 * 1033 *
@@ -5789,6 +5778,7 @@ void drm_mode_config_init(struct drm_device *dev)
5789 INIT_LIST_HEAD(&dev->mode_config.plane_list); 5778 INIT_LIST_HEAD(&dev->mode_config.plane_list);
5790 idr_init(&dev->mode_config.crtc_idr); 5779 idr_init(&dev->mode_config.crtc_idr);
5791 idr_init(&dev->mode_config.tile_idr); 5780 idr_init(&dev->mode_config.tile_idr);
5781 ida_init(&dev->mode_config.connector_ida);
5792 5782
5793 drm_modeset_lock_all(dev); 5783 drm_modeset_lock_all(dev);
5794 drm_mode_create_standard_properties(dev); 5784 drm_mode_create_standard_properties(dev);
@@ -5869,6 +5859,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
5869 crtc->funcs->destroy(crtc); 5859 crtc->funcs->destroy(crtc);
5870 } 5860 }
5871 5861
5862 ida_destroy(&dev->mode_config.connector_ida);
5872 idr_destroy(&dev->mode_config.tile_idr); 5863 idr_destroy(&dev->mode_config.tile_idr);
5873 idr_destroy(&dev->mode_config.crtc_idr); 5864 idr_destroy(&dev->mode_config.crtc_idr);
5874 drm_modeset_lock_fini(&dev->mode_config.connection_mutex); 5865 drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 8ae13de272c4..27fbd79d0daf 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1159,11 +1159,13 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1159 drm_dp_put_port(port); 1159 drm_dp_put_port(port);
1160 goto out; 1160 goto out;
1161 } 1161 }
1162 1162 if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
1163 drm_mode_connector_set_tile_property(port->connector); 1163 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1164 1164 drm_mode_connector_set_tile_property(port->connector);
1165 }
1165 (*mstb->mgr->cbs->register_connector)(port->connector); 1166 (*mstb->mgr->cbs->register_connector)(port->connector);
1166 } 1167 }
1168
1167out: 1169out:
1168 /* put reference to this port */ 1170 /* put reference to this port */
1169 drm_dp_put_port(port); 1171 drm_dp_put_port(port);
@@ -1188,8 +1190,8 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1188 port->ddps = conn_stat->displayport_device_plug_status; 1190 port->ddps = conn_stat->displayport_device_plug_status;
1189 1191
1190 if (old_ddps != port->ddps) { 1192 if (old_ddps != port->ddps) {
1191 dowork = true;
1192 if (port->ddps) { 1193 if (port->ddps) {
1194 dowork = true;
1193 } else { 1195 } else {
1194 port->available_pbn = 0; 1196 port->available_pbn = 0;
1195 } 1197 }
@@ -1294,13 +1296,8 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
1294 if (port->input) 1296 if (port->input)
1295 continue; 1297 continue;
1296 1298
1297 if (!port->ddps) { 1299 if (!port->ddps)
1298 if (port->cached_edid) {
1299 kfree(port->cached_edid);
1300 port->cached_edid = NULL;
1301 }
1302 continue; 1300 continue;
1303 }
1304 1301
1305 if (!port->available_pbn) 1302 if (!port->available_pbn)
1306 drm_dp_send_enum_path_resources(mgr, mstb, port); 1303 drm_dp_send_enum_path_resources(mgr, mstb, port);
@@ -1311,12 +1308,6 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
1311 drm_dp_check_and_send_link_address(mgr, mstb_child); 1308 drm_dp_check_and_send_link_address(mgr, mstb_child);
1312 drm_dp_put_mst_branch_device(mstb_child); 1309 drm_dp_put_mst_branch_device(mstb_child);
1313 } 1310 }
1314 } else if (port->pdt == DP_PEER_DEVICE_SST_SINK ||
1315 port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV) {
1316 if (!port->cached_edid) {
1317 port->cached_edid =
1318 drm_get_edid(port->connector, &port->aux.ddc);
1319 }
1320 } 1311 }
1321 } 1312 }
1322} 1313}
@@ -1336,8 +1327,6 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
1336 drm_dp_check_and_send_link_address(mgr, mstb); 1327 drm_dp_check_and_send_link_address(mgr, mstb);
1337 drm_dp_put_mst_branch_device(mstb); 1328 drm_dp_put_mst_branch_device(mstb);
1338 } 1329 }
1339
1340 (*mgr->cbs->hotplug)(mgr);
1341} 1330}
1342 1331
1343static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, 1332static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
@@ -1597,6 +1586,7 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1597 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { 1586 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1598 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); 1587 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
1599 } 1588 }
1589 (*mgr->cbs->hotplug)(mgr);
1600 } 1590 }
1601 } else { 1591 } else {
1602 mstb->link_address_sent = false; 1592 mstb->link_address_sent = false;
@@ -2293,6 +2283,8 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2293 drm_dp_update_port(mstb, &msg.u.conn_stat); 2283 drm_dp_update_port(mstb, &msg.u.conn_stat);
2294 2284
2295 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); 2285 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2286 (*mgr->cbs->hotplug)(mgr);
2287
2296 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { 2288 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2297 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); 2289 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2298 if (!mstb) 2290 if (!mstb)
@@ -2379,6 +2371,10 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
2379 2371
2380 case DP_PEER_DEVICE_SST_SINK: 2372 case DP_PEER_DEVICE_SST_SINK:
2381 status = connector_status_connected; 2373 status = connector_status_connected;
2374 /* for logical ports - cache the EDID */
2375 if (port->port_num >= 8 && !port->cached_edid) {
2376 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
2377 }
2382 break; 2378 break;
2383 case DP_PEER_DEVICE_DP_LEGACY_CONV: 2379 case DP_PEER_DEVICE_DP_LEGACY_CONV:
2384 if (port->ldps) 2380 if (port->ldps)
@@ -2433,7 +2429,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
2433 2429
2434 if (port->cached_edid) 2430 if (port->cached_edid)
2435 edid = drm_edid_duplicate(port->cached_edid); 2431 edid = drm_edid_duplicate(port->cached_edid);
2436 2432 else {
2433 edid = drm_get_edid(connector, &port->aux.ddc);
2434 drm_mode_connector_set_tile_property(connector);
2435 }
2437 port->has_audio = drm_detect_monitor_audio(edid); 2436 port->has_audio = drm_detect_monitor_audio(edid);
2438 drm_dp_put_port(port); 2437 drm_dp_put_port(port);
2439 return edid; 2438 return edid;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index d12a4efa651b..1fe14579e8c9 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -224,6 +224,64 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
224 diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0; 224 diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0;
225 } 225 }
226 226
227 /*
228 * Within a drm_vblank_pre_modeset - drm_vblank_post_modeset
229 * interval? If so then vblank irqs keep running and it will likely
230 * happen that the hardware vblank counter is not trustworthy as it
231 * might reset at some point in that interval and vblank timestamps
232 * are not trustworthy either in that interval. Iow. this can result
233 * in a bogus diff >> 1 which must be avoided as it would cause
234 * random large forward jumps of the software vblank counter.
235 */
236 if (diff > 1 && (vblank->inmodeset & 0x2)) {
237 DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u"
238 " due to pre-modeset.\n", pipe, diff);
239 diff = 1;
240 }
241
242 /*
243 * FIMXE: Need to replace this hack with proper seqlocks.
244 *
245 * Restrict the bump of the software vblank counter to a safe maximum
246 * value of +1 whenever there is the possibility that concurrent readers
247 * of vblank timestamps could be active at the moment, as the current
248 * implementation of the timestamp caching and updating is not safe
249 * against concurrent readers for calls to store_vblank() with a bump
250 * of anything but +1. A bump != 1 would very likely return corrupted
251 * timestamps to userspace, because the same slot in the cache could
252 * be concurrently written by store_vblank() and read by one of those
253 * readers without the read-retry logic detecting the collision.
254 *
255 * Concurrent readers can exist when we are called from the
256 * drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
257 * irq callers. However, all those calls to us are happening with the
258 * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
259 * can't increase while we are executing. Therefore a zero refcount at
260 * this point is safe for arbitrary counter bumps if we are called
261 * outside vblank irq, a non-zero count is not 100% safe. Unfortunately
262 * we must also accept a refcount of 1, as whenever we are called from
263 * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
264 * we must let that one pass through in order to not lose vblank counts
265 * during vblank irq off - which would completely defeat the whole
266 * point of this routine.
267 *
268 * Whenever we are called from vblank irq, we have to assume concurrent
269 * readers exist or can show up any time during our execution, even if
270 * the refcount is currently zero, as vblank irqs are usually only
271 * enabled due to the presence of readers, and because when we are called
272 * from vblank irq we can't hold the vbl_lock to protect us from sudden
273 * bumps in vblank refcount. Therefore also restrict bumps to +1 when
274 * called from vblank irq.
275 */
276 if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
277 (flags & DRM_CALLED_FROM_VBLIRQ))) {
278 DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
279 "refcount %u, vblirq %u\n", pipe, diff,
280 atomic_read(&vblank->refcount),
281 (flags & DRM_CALLED_FROM_VBLIRQ) != 0);
282 diff = 1;
283 }
284
227 DRM_DEBUG_VBL("updating vblank count on crtc %u:" 285 DRM_DEBUG_VBL("updating vblank count on crtc %u:"
228 " current=%u, diff=%u, hw=%u hw_last=%u\n", 286 " current=%u, diff=%u, hw=%u hw_last=%u\n",
229 pipe, vblank->count, diff, cur_vblank, vblank->last); 287 pipe, vblank->count, diff, cur_vblank, vblank->last);
@@ -1316,7 +1374,13 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
1316 spin_lock_irqsave(&dev->event_lock, irqflags); 1374 spin_lock_irqsave(&dev->event_lock, irqflags);
1317 1375
1318 spin_lock(&dev->vbl_lock); 1376 spin_lock(&dev->vbl_lock);
1319 vblank_disable_and_save(dev, pipe); 1377 DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
1378 pipe, vblank->enabled, vblank->inmodeset);
1379
1380 /* Avoid redundant vblank disables without previous drm_vblank_on(). */
1381 if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
1382 vblank_disable_and_save(dev, pipe);
1383
1320 wake_up(&vblank->queue); 1384 wake_up(&vblank->queue);
1321 1385
1322 /* 1386 /*
@@ -1418,6 +1482,9 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
1418 return; 1482 return;
1419 1483
1420 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1484 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1485 DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
1486 pipe, vblank->enabled, vblank->inmodeset);
1487
1421 /* Drop our private "prevent drm_vblank_get" refcount */ 1488 /* Drop our private "prevent drm_vblank_get" refcount */
1422 if (vblank->inmodeset) { 1489 if (vblank->inmodeset) {
1423 atomic_dec(&vblank->refcount); 1490 atomic_dec(&vblank->refcount);
@@ -1430,8 +1497,7 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
1430 * re-enable interrupts if there are users left, or the 1497 * re-enable interrupts if there are users left, or the
1431 * user wishes vblank interrupts to be enabled all the time. 1498 * user wishes vblank interrupts to be enabled all the time.
1432 */ 1499 */
1433 if (atomic_read(&vblank->refcount) != 0 || 1500 if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
1434 (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
1435 WARN_ON(drm_vblank_enable(dev, pipe)); 1501 WARN_ON(drm_vblank_enable(dev, pipe));
1436 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1502 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1437} 1503}
@@ -1526,6 +1592,7 @@ void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
1526 if (vblank->inmodeset) { 1592 if (vblank->inmodeset) {
1527 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1593 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1528 dev->vblank_disable_allowed = true; 1594 dev->vblank_disable_allowed = true;
1595 drm_reset_vblank_timestamp(dev, pipe);
1529 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1596 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1530 1597
1531 if (vblank->inmodeset & 0x2) 1598 if (vblank->inmodeset & 0x2)
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 83efca941388..f17d39279596 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -1,6 +1,6 @@
1config DRM_EXYNOS 1config DRM_EXYNOS
2 tristate "DRM Support for Samsung SoC EXYNOS Series" 2 tristate "DRM Support for Samsung SoC EXYNOS Series"
3 depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM) 3 depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM)
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_KMS_FB_HELPER 5 select DRM_KMS_FB_HELPER
6 select FB_CFB_FILLRECT 6 select FB_CFB_FILLRECT
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 1bf6a21130c7..162ab93e99cb 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -93,7 +93,7 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
93 if (test_bit(BIT_SUSPENDED, &ctx->flags)) 93 if (test_bit(BIT_SUSPENDED, &ctx->flags))
94 return -EPERM; 94 return -EPERM;
95 95
96 if (test_and_set_bit(BIT_IRQS_ENABLED, &ctx->flags)) { 96 if (!test_and_set_bit(BIT_IRQS_ENABLED, &ctx->flags)) {
97 val = VIDINTCON0_INTEN; 97 val = VIDINTCON0_INTEN;
98 if (ctx->out_type == IFTYPE_I80) 98 if (ctx->out_type == IFTYPE_I80)
99 val |= VIDINTCON0_FRAMEDONE; 99 val |= VIDINTCON0_FRAMEDONE;
@@ -402,8 +402,6 @@ static void decon_enable(struct exynos_drm_crtc *crtc)
402 decon_enable_vblank(ctx->crtc); 402 decon_enable_vblank(ctx->crtc);
403 403
404 decon_commit(ctx->crtc); 404 decon_commit(ctx->crtc);
405
406 set_bit(BIT_SUSPENDED, &ctx->flags);
407} 405}
408 406
409static void decon_disable(struct exynos_drm_crtc *crtc) 407static void decon_disable(struct exynos_drm_crtc *crtc)
@@ -582,9 +580,9 @@ out:
582static int exynos5433_decon_suspend(struct device *dev) 580static int exynos5433_decon_suspend(struct device *dev)
583{ 581{
584 struct decon_context *ctx = dev_get_drvdata(dev); 582 struct decon_context *ctx = dev_get_drvdata(dev);
585 int i; 583 int i = ARRAY_SIZE(decon_clks_name);
586 584
587 for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) 585 while (--i >= 0)
588 clk_disable_unprepare(ctx->clks[i]); 586 clk_disable_unprepare(ctx->clks[i]);
589 587
590 return 0; 588 return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index e977a81af2e6..26e81d191f56 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1782,6 +1782,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
1782 1782
1783 bridge = of_drm_find_bridge(dsi->bridge_node); 1783 bridge = of_drm_find_bridge(dsi->bridge_node);
1784 if (bridge) { 1784 if (bridge) {
1785 encoder->bridge = bridge;
1785 drm_bridge_attach(drm_dev, bridge); 1786 drm_bridge_attach(drm_dev, bridge);
1786 } 1787 }
1787 1788
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index f6118baa8e3e..8baabd813ff5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -50,7 +50,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
50 if (vm_size > exynos_gem->size) 50 if (vm_size > exynos_gem->size)
51 return -EINVAL; 51 return -EINVAL;
52 52
53 ret = dma_mmap_attrs(helper->dev->dev, vma, exynos_gem->pages, 53 ret = dma_mmap_attrs(helper->dev->dev, vma, exynos_gem->cookie,
54 exynos_gem->dma_addr, exynos_gem->size, 54 exynos_gem->dma_addr, exynos_gem->size,
55 &exynos_gem->dma_attrs); 55 &exynos_gem->dma_attrs);
56 if (ret < 0) { 56 if (ret < 0) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index c747824f3c98..8a4f4a0211d0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1723,7 +1723,7 @@ static int fimc_probe(struct platform_device *pdev)
1723 goto err_put_clk; 1723 goto err_put_clk;
1724 } 1724 }
1725 1725
1726 DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv); 1726 DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv);
1727 1727
1728 spin_lock_init(&ctx->lock); 1728 spin_lock_init(&ctx->lock);
1729 platform_set_drvdata(pdev, ctx); 1729 platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index c17efdb238a6..8dfe6e113a88 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1166,7 +1166,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
1166 goto err_free_event; 1166 goto err_free_event;
1167 } 1167 }
1168 1168
1169 cmd = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd; 1169 cmd = (struct drm_exynos_g2d_cmd *)(unsigned long)req->cmd;
1170 1170
1171 if (copy_from_user(cmdlist->data + cmdlist->last, 1171 if (copy_from_user(cmdlist->data + cmdlist->last,
1172 (void __user *)cmd, 1172 (void __user *)cmd,
@@ -1184,7 +1184,8 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
1184 if (req->cmd_buf_nr) { 1184 if (req->cmd_buf_nr) {
1185 struct drm_exynos_g2d_cmd *cmd_buf; 1185 struct drm_exynos_g2d_cmd *cmd_buf;
1186 1186
1187 cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf; 1187 cmd_buf = (struct drm_exynos_g2d_cmd *)
1188 (unsigned long)req->cmd_buf;
1188 1189
1189 if (copy_from_user(cmdlist->data + cmdlist->last, 1190 if (copy_from_user(cmdlist->data + cmdlist->last,
1190 (void __user *)cmd_buf, 1191 (void __user *)cmd_buf,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 32358c5e3db4..26b5e4bd55b6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -218,7 +218,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
218 return ERR_PTR(ret); 218 return ERR_PTR(ret);
219 } 219 }
220 220
221 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); 221 DRM_DEBUG_KMS("created file object = %p\n", obj->filp);
222 222
223 return exynos_gem; 223 return exynos_gem;
224} 224}
@@ -335,7 +335,7 @@ static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
335 if (vm_size > exynos_gem->size) 335 if (vm_size > exynos_gem->size)
336 return -EINVAL; 336 return -EINVAL;
337 337
338 ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem->pages, 338 ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem->cookie,
339 exynos_gem->dma_addr, exynos_gem->size, 339 exynos_gem->dma_addr, exynos_gem->size,
340 &exynos_gem->dma_attrs); 340 &exynos_gem->dma_attrs);
341 if (ret < 0) { 341 if (ret < 0) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 7aecd23cfa11..5d20da8f957e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1723,7 +1723,7 @@ static int gsc_probe(struct platform_device *pdev)
1723 return ret; 1723 return ret;
1724 } 1724 }
1725 1725
1726 DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv); 1726 DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv);
1727 1727
1728 mutex_init(&ctx->lock); 1728 mutex_init(&ctx->lock);
1729 platform_set_drvdata(pdev, ctx); 1729 platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 67d24236e745..95eeb9116f10 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -208,7 +208,7 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
208 * e.g PAUSE state, queue buf, command control. 208 * e.g PAUSE state, queue buf, command control.
209 */ 209 */
210 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 210 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
211 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv); 211 DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", count++, ippdrv);
212 212
213 mutex_lock(&ippdrv->cmd_lock); 213 mutex_lock(&ippdrv->cmd_lock);
214 list_for_each_entry(c_node, &ippdrv->cmd_list, list) { 214 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
@@ -388,8 +388,8 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
388 } 388 }
389 property->prop_id = ret; 389 property->prop_id = ret;
390 390
391 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n", 391 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%p]\n",
392 property->prop_id, property->cmd, (int)ippdrv); 392 property->prop_id, property->cmd, ippdrv);
393 393
394 /* stored property information and ippdrv in private data */ 394 /* stored property information and ippdrv in private data */
395 c_node->property = *property; 395 c_node->property = *property;
@@ -518,7 +518,7 @@ static int ipp_put_mem_node(struct drm_device *drm_dev,
518{ 518{
519 int i; 519 int i;
520 520
521 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); 521 DRM_DEBUG_KMS("node[%p]\n", m_node);
522 522
523 if (!m_node) { 523 if (!m_node) {
524 DRM_ERROR("invalid dequeue node.\n"); 524 DRM_ERROR("invalid dequeue node.\n");
@@ -562,7 +562,7 @@ static struct drm_exynos_ipp_mem_node
562 m_node->buf_id = qbuf->buf_id; 562 m_node->buf_id = qbuf->buf_id;
563 INIT_LIST_HEAD(&m_node->list); 563 INIT_LIST_HEAD(&m_node->list);
564 564
565 DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id); 565 DRM_DEBUG_KMS("m_node[%p]ops_id[%d]\n", m_node, qbuf->ops_id);
566 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); 566 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
567 567
568 for_each_ipp_planar(i) { 568 for_each_ipp_planar(i) {
@@ -582,8 +582,8 @@ static struct drm_exynos_ipp_mem_node
582 582
583 buf_info->handles[i] = qbuf->handle[i]; 583 buf_info->handles[i] = qbuf->handle[i];
584 buf_info->base[i] = *addr; 584 buf_info->base[i] = *addr;
585 DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i, 585 DRM_DEBUG_KMS("i[%d]base[%pad]hd[0x%lx]\n", i,
586 buf_info->base[i], buf_info->handles[i]); 586 &buf_info->base[i], buf_info->handles[i]);
587 } 587 }
588 } 588 }
589 589
@@ -664,7 +664,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
664 664
665 mutex_lock(&c_node->event_lock); 665 mutex_lock(&c_node->event_lock);
666 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { 666 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
667 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e); 667 DRM_DEBUG_KMS("count[%d]e[%p]\n", count++, e);
668 668
669 /* 669 /*
670 * qbuf == NULL condition means all event deletion. 670 * qbuf == NULL condition means all event deletion.
@@ -755,7 +755,7 @@ static struct drm_exynos_ipp_mem_node
755 755
756 /* find memory node from memory list */ 756 /* find memory node from memory list */
757 list_for_each_entry(m_node, head, list) { 757 list_for_each_entry(m_node, head, list) {
758 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node); 758 DRM_DEBUG_KMS("count[%d]m_node[%p]\n", count++, m_node);
759 759
760 /* compare buffer id */ 760 /* compare buffer id */
761 if (m_node->buf_id == qbuf->buf_id) 761 if (m_node->buf_id == qbuf->buf_id)
@@ -772,7 +772,7 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
772 struct exynos_drm_ipp_ops *ops = NULL; 772 struct exynos_drm_ipp_ops *ops = NULL;
773 int ret = 0; 773 int ret = 0;
774 774
775 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); 775 DRM_DEBUG_KMS("node[%p]\n", m_node);
776 776
777 if (!m_node) { 777 if (!m_node) {
778 DRM_ERROR("invalid queue node.\n"); 778 DRM_ERROR("invalid queue node.\n");
@@ -1237,7 +1237,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1237 m_node = list_first_entry(head, 1237 m_node = list_first_entry(head,
1238 struct drm_exynos_ipp_mem_node, list); 1238 struct drm_exynos_ipp_mem_node, list);
1239 1239
1240 DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node); 1240 DRM_DEBUG_KMS("m_node[%p]\n", m_node);
1241 1241
1242 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1242 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1243 if (ret) { 1243 if (ret) {
@@ -1610,8 +1610,8 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1610 } 1610 }
1611 ippdrv->prop_list.ipp_id = ret; 1611 ippdrv->prop_list.ipp_id = ret;
1612 1612
1613 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n", 1613 DRM_DEBUG_KMS("count[%d]ippdrv[%p]ipp_id[%d]\n",
1614 count++, (int)ippdrv, ret); 1614 count++, ippdrv, ret);
1615 1615
1616 /* store parent device for node */ 1616 /* store parent device for node */
1617 ippdrv->parent_dev = dev; 1617 ippdrv->parent_dev = dev;
@@ -1668,7 +1668,7 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1668 1668
1669 file_priv->ipp_dev = dev; 1669 file_priv->ipp_dev = dev;
1670 1670
1671 DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev); 1671 DRM_DEBUG_KMS("done priv[%p]\n", dev);
1672 1672
1673 return 0; 1673 return 0;
1674} 1674}
@@ -1685,8 +1685,8 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1685 mutex_lock(&ippdrv->cmd_lock); 1685 mutex_lock(&ippdrv->cmd_lock);
1686 list_for_each_entry_safe(c_node, tc_node, 1686 list_for_each_entry_safe(c_node, tc_node,
1687 &ippdrv->cmd_list, list) { 1687 &ippdrv->cmd_list, list) {
1688 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", 1688 DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n",
1689 count++, (int)ippdrv); 1689 count++, ippdrv);
1690 1690
1691 if (c_node->filp == file) { 1691 if (c_node->filp == file) {
1692 /* 1692 /*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 4eaef36aec5a..9869d70e9e54 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -18,6 +18,7 @@
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/of_graph.h> 19#include <linux/of_graph.h>
20#include <linux/clk.h> 20#include <linux/clk.h>
21#include <linux/component.h>
21#include <drm/drmP.h> 22#include <drm/drmP.h>
22#include <linux/mfd/syscon.h> 23#include <linux/mfd/syscon.h>
23#include <linux/regmap.h> 24#include <linux/regmap.h>
@@ -306,9 +307,9 @@ exit:
306 return ret; 307 return ret;
307} 308}
308 309
309void mic_disable(struct drm_bridge *bridge) { } 310static void mic_disable(struct drm_bridge *bridge) { }
310 311
311void mic_post_disable(struct drm_bridge *bridge) 312static void mic_post_disable(struct drm_bridge *bridge)
312{ 313{
313 struct exynos_mic *mic = bridge->driver_private; 314 struct exynos_mic *mic = bridge->driver_private;
314 int i; 315 int i;
@@ -328,7 +329,7 @@ already_disabled:
328 mutex_unlock(&mic_mutex); 329 mutex_unlock(&mic_mutex);
329} 330}
330 331
331void mic_pre_enable(struct drm_bridge *bridge) 332static void mic_pre_enable(struct drm_bridge *bridge)
332{ 333{
333 struct exynos_mic *mic = bridge->driver_private; 334 struct exynos_mic *mic = bridge->driver_private;
334 int ret, i; 335 int ret, i;
@@ -371,11 +372,35 @@ already_enabled:
371 mutex_unlock(&mic_mutex); 372 mutex_unlock(&mic_mutex);
372} 373}
373 374
374void mic_enable(struct drm_bridge *bridge) { } 375static void mic_enable(struct drm_bridge *bridge) { }
375 376
376void mic_destroy(struct drm_bridge *bridge) 377static const struct drm_bridge_funcs mic_bridge_funcs = {
378 .disable = mic_disable,
379 .post_disable = mic_post_disable,
380 .pre_enable = mic_pre_enable,
381 .enable = mic_enable,
382};
383
384static int exynos_mic_bind(struct device *dev, struct device *master,
385 void *data)
377{ 386{
378 struct exynos_mic *mic = bridge->driver_private; 387 struct exynos_mic *mic = dev_get_drvdata(dev);
388 int ret;
389
390 mic->bridge.funcs = &mic_bridge_funcs;
391 mic->bridge.of_node = dev->of_node;
392 mic->bridge.driver_private = mic;
393 ret = drm_bridge_add(&mic->bridge);
394 if (ret)
395 DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
396
397 return ret;
398}
399
400static void exynos_mic_unbind(struct device *dev, struct device *master,
401 void *data)
402{
403 struct exynos_mic *mic = dev_get_drvdata(dev);
379 int i; 404 int i;
380 405
381 mutex_lock(&mic_mutex); 406 mutex_lock(&mic_mutex);
@@ -387,16 +412,16 @@ void mic_destroy(struct drm_bridge *bridge)
387 412
388already_disabled: 413already_disabled:
389 mutex_unlock(&mic_mutex); 414 mutex_unlock(&mic_mutex);
415
416 drm_bridge_remove(&mic->bridge);
390} 417}
391 418
392static const struct drm_bridge_funcs mic_bridge_funcs = { 419static const struct component_ops exynos_mic_component_ops = {
393 .disable = mic_disable, 420 .bind = exynos_mic_bind,
394 .post_disable = mic_post_disable, 421 .unbind = exynos_mic_unbind,
395 .pre_enable = mic_pre_enable,
396 .enable = mic_enable,
397}; 422};
398 423
399int exynos_mic_probe(struct platform_device *pdev) 424static int exynos_mic_probe(struct platform_device *pdev)
400{ 425{
401 struct device *dev = &pdev->dev; 426 struct device *dev = &pdev->dev;
402 struct exynos_mic *mic; 427 struct exynos_mic *mic;
@@ -435,17 +460,8 @@ int exynos_mic_probe(struct platform_device *pdev)
435 goto err; 460 goto err;
436 } 461 }
437 462
438 mic->bridge.funcs = &mic_bridge_funcs;
439 mic->bridge.of_node = dev->of_node;
440 mic->bridge.driver_private = mic;
441 ret = drm_bridge_add(&mic->bridge);
442 if (ret) {
443 DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
444 goto err;
445 }
446
447 for (i = 0; i < NUM_CLKS; i++) { 463 for (i = 0; i < NUM_CLKS; i++) {
448 mic->clks[i] = of_clk_get_by_name(dev->of_node, clk_names[i]); 464 mic->clks[i] = devm_clk_get(dev, clk_names[i]);
449 if (IS_ERR(mic->clks[i])) { 465 if (IS_ERR(mic->clks[i])) {
450 DRM_ERROR("mic: Failed to get clock (%s)\n", 466 DRM_ERROR("mic: Failed to get clock (%s)\n",
451 clk_names[i]); 467 clk_names[i]);
@@ -454,7 +470,10 @@ int exynos_mic_probe(struct platform_device *pdev)
454 } 470 }
455 } 471 }
456 472
473 platform_set_drvdata(pdev, mic);
474
457 DRM_DEBUG_KMS("MIC has been probed\n"); 475 DRM_DEBUG_KMS("MIC has been probed\n");
476 return component_add(dev, &exynos_mic_component_ops);
458 477
459err: 478err:
460 return ret; 479 return ret;
@@ -462,14 +481,7 @@ err:
462 481
463static int exynos_mic_remove(struct platform_device *pdev) 482static int exynos_mic_remove(struct platform_device *pdev)
464{ 483{
465 struct exynos_mic *mic = platform_get_drvdata(pdev); 484 component_del(&pdev->dev, &exynos_mic_component_ops);
466 int i;
467
468 drm_bridge_remove(&mic->bridge);
469
470 for (i = NUM_CLKS - 1; i > -1; i--)
471 clk_put(mic->clks[i]);
472
473 return 0; 485 return 0;
474} 486}
475 487
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index bea0f7826d30..ce59f4443394 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -754,7 +754,7 @@ static int rotator_probe(struct platform_device *pdev)
754 goto err_ippdrv_register; 754 goto err_ippdrv_register;
755 } 755 }
756 756
757 DRM_DEBUG_KMS("ippdrv[0x%x]\n", (int)ippdrv); 757 DRM_DEBUG_KMS("ippdrv[%p]\n", ippdrv);
758 758
759 platform_set_drvdata(pdev, rot); 759 platform_set_drvdata(pdev, rot);
760 760
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 62ac4e5fa51d..b605bd7395ec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -223,7 +223,7 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
223 } 223 }
224} 224}
225 225
226static int vidi_show_connection(struct device *dev, 226static ssize_t vidi_show_connection(struct device *dev,
227 struct device_attribute *attr, char *buf) 227 struct device_attribute *attr, char *buf)
228{ 228{
229 struct vidi_context *ctx = dev_get_drvdata(dev); 229 struct vidi_context *ctx = dev_get_drvdata(dev);
@@ -238,7 +238,7 @@ static int vidi_show_connection(struct device *dev,
238 return rc; 238 return rc;
239} 239}
240 240
241static int vidi_store_connection(struct device *dev, 241static ssize_t vidi_store_connection(struct device *dev,
242 struct device_attribute *attr, 242 struct device_attribute *attr,
243 const char *buf, size_t len) 243 const char *buf, size_t len)
244{ 244{
@@ -294,7 +294,9 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
294 } 294 }
295 295
296 if (vidi->connection) { 296 if (vidi->connection) {
297 struct edid *raw_edid = (struct edid *)(uint32_t)vidi->edid; 297 struct edid *raw_edid;
298
299 raw_edid = (struct edid *)(unsigned long)vidi->edid;
298 if (!drm_edid_is_valid(raw_edid)) { 300 if (!drm_edid_is_valid(raw_edid)) {
299 DRM_DEBUG_KMS("edid data is invalid.\n"); 301 DRM_DEBUG_KMS("edid data is invalid.\n");
300 return -EINVAL; 302 return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0fc38bb7276c..cf39ed3133d6 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -825,8 +825,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
825 } 825 }
826 826
827 for_each_pipe(dev_priv, pipe) { 827 for_each_pipe(dev_priv, pipe) {
828 if (!intel_display_power_is_enabled(dev_priv, 828 enum intel_display_power_domain power_domain;
829 POWER_DOMAIN_PIPE(pipe))) { 829
830 power_domain = POWER_DOMAIN_PIPE(pipe);
831 if (!intel_display_power_get_if_enabled(dev_priv,
832 power_domain)) {
830 seq_printf(m, "Pipe %c power disabled\n", 833 seq_printf(m, "Pipe %c power disabled\n",
831 pipe_name(pipe)); 834 pipe_name(pipe));
832 continue; 835 continue;
@@ -840,6 +843,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
840 seq_printf(m, "Pipe %c IER:\t%08x\n", 843 seq_printf(m, "Pipe %c IER:\t%08x\n",
841 pipe_name(pipe), 844 pipe_name(pipe),
842 I915_READ(GEN8_DE_PIPE_IER(pipe))); 845 I915_READ(GEN8_DE_PIPE_IER(pipe)));
846
847 intel_display_power_put(dev_priv, power_domain);
843 } 848 }
844 849
845 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 850 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
@@ -3985,6 +3990,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3985 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 3990 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
3986 struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, 3991 struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
3987 pipe)); 3992 pipe));
3993 enum intel_display_power_domain power_domain;
3988 u32 val = 0; /* shut up gcc */ 3994 u32 val = 0; /* shut up gcc */
3989 int ret; 3995 int ret;
3990 3996
@@ -3995,7 +4001,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3995 if (pipe_crc->source && source) 4001 if (pipe_crc->source && source)
3996 return -EINVAL; 4002 return -EINVAL;
3997 4003
3998 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) { 4004 power_domain = POWER_DOMAIN_PIPE(pipe);
4005 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
3999 DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); 4006 DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
4000 return -EIO; 4007 return -EIO;
4001 } 4008 }
@@ -4012,7 +4019,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
4012 ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val); 4019 ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4013 4020
4014 if (ret != 0) 4021 if (ret != 0)
4015 return ret; 4022 goto out;
4016 4023
4017 /* none -> real source transition */ 4024 /* none -> real source transition */
4018 if (source) { 4025 if (source) {
@@ -4024,8 +4031,10 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
4024 entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR, 4031 entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
4025 sizeof(pipe_crc->entries[0]), 4032 sizeof(pipe_crc->entries[0]),
4026 GFP_KERNEL); 4033 GFP_KERNEL);
4027 if (!entries) 4034 if (!entries) {
4028 return -ENOMEM; 4035 ret = -ENOMEM;
4036 goto out;
4037 }
4029 4038
4030 /* 4039 /*
4031 * When IPS gets enabled, the pipe CRC changes. Since IPS gets 4040 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
@@ -4081,7 +4090,12 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
4081 hsw_enable_ips(crtc); 4090 hsw_enable_ips(crtc);
4082 } 4091 }
4083 4092
4084 return 0; 4093 ret = 0;
4094
4095out:
4096 intel_display_power_put(dev_priv, power_domain);
4097
4098 return ret;
4085} 4099}
4086 4100
4087/* 4101/*
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e7cd311e9fbb..b0847b915545 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -751,6 +751,7 @@ struct intel_csr {
751 uint32_t mmio_count; 751 uint32_t mmio_count;
752 i915_reg_t mmioaddr[8]; 752 i915_reg_t mmioaddr[8];
753 uint32_t mmiodata[8]; 753 uint32_t mmiodata[8];
754 uint32_t dc_state;
754}; 755};
755 756
756#define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 757#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b9a564b76528..4897728713f6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3287,19 +3287,20 @@ enum skl_disp_power_wells {
3287 3287
3288#define PORT_HOTPLUG_STAT _MMIO(dev_priv->info.display_mmio_offset + 0x61114) 3288#define PORT_HOTPLUG_STAT _MMIO(dev_priv->info.display_mmio_offset + 0x61114)
3289/* 3289/*
3290 * HDMI/DP bits are gen4+ 3290 * HDMI/DP bits are g4x+
3291 * 3291 *
3292 * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused. 3292 * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
3293 * Please check the detailed lore in the commit message for for experimental 3293 * Please check the detailed lore in the commit message for for experimental
3294 * evidence. 3294 * evidence.
3295 */ 3295 */
3296#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29) 3296/* Bspec says GM45 should match G4X/VLV/CHV, but reality disagrees */
3297#define PORTD_HOTPLUG_LIVE_STATUS_GM45 (1 << 29)
3298#define PORTC_HOTPLUG_LIVE_STATUS_GM45 (1 << 28)
3299#define PORTB_HOTPLUG_LIVE_STATUS_GM45 (1 << 27)
3300/* G4X/VLV/CHV DP/HDMI bits again match Bspec */
3301#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
3297#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28) 3302#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
3298#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27) 3303#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
3299/* VLV DP/HDMI bits again match Bspec */
3300#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
3301#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
3302#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
3303#define PORTD_HOTPLUG_INT_STATUS (3 << 21) 3304#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
3304#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21) 3305#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
3305#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21) 3306#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9c89df1af036..a7b4a524fadd 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -71,22 +71,29 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
71 struct intel_crt *crt = intel_encoder_to_crt(encoder); 71 struct intel_crt *crt = intel_encoder_to_crt(encoder);
72 enum intel_display_power_domain power_domain; 72 enum intel_display_power_domain power_domain;
73 u32 tmp; 73 u32 tmp;
74 bool ret;
74 75
75 power_domain = intel_display_port_power_domain(encoder); 76 power_domain = intel_display_port_power_domain(encoder);
76 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 77 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
77 return false; 78 return false;
78 79
80 ret = false;
81
79 tmp = I915_READ(crt->adpa_reg); 82 tmp = I915_READ(crt->adpa_reg);
80 83
81 if (!(tmp & ADPA_DAC_ENABLE)) 84 if (!(tmp & ADPA_DAC_ENABLE))
82 return false; 85 goto out;
83 86
84 if (HAS_PCH_CPT(dev)) 87 if (HAS_PCH_CPT(dev))
85 *pipe = PORT_TO_PIPE_CPT(tmp); 88 *pipe = PORT_TO_PIPE_CPT(tmp);
86 else 89 else
87 *pipe = PORT_TO_PIPE(tmp); 90 *pipe = PORT_TO_PIPE(tmp);
88 91
89 return true; 92 ret = true;
93out:
94 intel_display_power_put(dev_priv, power_domain);
95
96 return ret;
90} 97}
91 98
92static unsigned int intel_crt_get_flags(struct intel_encoder *encoder) 99static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 9bb63a85997a..647d85e77c2f 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -240,6 +240,8 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
240 I915_WRITE(dev_priv->csr.mmioaddr[i], 240 I915_WRITE(dev_priv->csr.mmioaddr[i],
241 dev_priv->csr.mmiodata[i]); 241 dev_priv->csr.mmiodata[i]);
242 } 242 }
243
244 dev_priv->csr.dc_state = 0;
243} 245}
244 246
245static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, 247static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 54a165b9c92d..0f3df2c39f7c 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1969,13 +1969,16 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
1969 enum transcoder cpu_transcoder; 1969 enum transcoder cpu_transcoder;
1970 enum intel_display_power_domain power_domain; 1970 enum intel_display_power_domain power_domain;
1971 uint32_t tmp; 1971 uint32_t tmp;
1972 bool ret;
1972 1973
1973 power_domain = intel_display_port_power_domain(intel_encoder); 1974 power_domain = intel_display_port_power_domain(intel_encoder);
1974 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 1975 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
1975 return false; 1976 return false;
1976 1977
1977 if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) 1978 if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) {
1978 return false; 1979 ret = false;
1980 goto out;
1981 }
1979 1982
1980 if (port == PORT_A) 1983 if (port == PORT_A)
1981 cpu_transcoder = TRANSCODER_EDP; 1984 cpu_transcoder = TRANSCODER_EDP;
@@ -1987,23 +1990,33 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
1987 switch (tmp & TRANS_DDI_MODE_SELECT_MASK) { 1990 switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
1988 case TRANS_DDI_MODE_SELECT_HDMI: 1991 case TRANS_DDI_MODE_SELECT_HDMI:
1989 case TRANS_DDI_MODE_SELECT_DVI: 1992 case TRANS_DDI_MODE_SELECT_DVI:
1990 return (type == DRM_MODE_CONNECTOR_HDMIA); 1993 ret = type == DRM_MODE_CONNECTOR_HDMIA;
1994 break;
1991 1995
1992 case TRANS_DDI_MODE_SELECT_DP_SST: 1996 case TRANS_DDI_MODE_SELECT_DP_SST:
1993 if (type == DRM_MODE_CONNECTOR_eDP) 1997 ret = type == DRM_MODE_CONNECTOR_eDP ||
1994 return true; 1998 type == DRM_MODE_CONNECTOR_DisplayPort;
1995 return (type == DRM_MODE_CONNECTOR_DisplayPort); 1999 break;
2000
1996 case TRANS_DDI_MODE_SELECT_DP_MST: 2001 case TRANS_DDI_MODE_SELECT_DP_MST:
1997 /* if the transcoder is in MST state then 2002 /* if the transcoder is in MST state then
1998 * connector isn't connected */ 2003 * connector isn't connected */
1999 return false; 2004 ret = false;
2005 break;
2000 2006
2001 case TRANS_DDI_MODE_SELECT_FDI: 2007 case TRANS_DDI_MODE_SELECT_FDI:
2002 return (type == DRM_MODE_CONNECTOR_VGA); 2008 ret = type == DRM_MODE_CONNECTOR_VGA;
2009 break;
2003 2010
2004 default: 2011 default:
2005 return false; 2012 ret = false;
2013 break;
2006 } 2014 }
2015
2016out:
2017 intel_display_power_put(dev_priv, power_domain);
2018
2019 return ret;
2007} 2020}
2008 2021
2009bool intel_ddi_get_hw_state(struct intel_encoder *encoder, 2022bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -2015,15 +2028,18 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
2015 enum intel_display_power_domain power_domain; 2028 enum intel_display_power_domain power_domain;
2016 u32 tmp; 2029 u32 tmp;
2017 int i; 2030 int i;
2031 bool ret;
2018 2032
2019 power_domain = intel_display_port_power_domain(encoder); 2033 power_domain = intel_display_port_power_domain(encoder);
2020 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 2034 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2021 return false; 2035 return false;
2022 2036
2037 ret = false;
2038
2023 tmp = I915_READ(DDI_BUF_CTL(port)); 2039 tmp = I915_READ(DDI_BUF_CTL(port));
2024 2040
2025 if (!(tmp & DDI_BUF_CTL_ENABLE)) 2041 if (!(tmp & DDI_BUF_CTL_ENABLE))
2026 return false; 2042 goto out;
2027 2043
2028 if (port == PORT_A) { 2044 if (port == PORT_A) {
2029 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 2045 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
@@ -2041,25 +2057,32 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
2041 break; 2057 break;
2042 } 2058 }
2043 2059
2044 return true; 2060 ret = true;
2045 } else {
2046 for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
2047 tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
2048 2061
2049 if ((tmp & TRANS_DDI_PORT_MASK) 2062 goto out;
2050 == TRANS_DDI_SELECT_PORT(port)) { 2063 }
2051 if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST)
2052 return false;
2053 2064
2054 *pipe = i; 2065 for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
2055 return true; 2066 tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
2056 } 2067
2068 if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) {
2069 if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
2070 TRANS_DDI_MODE_SELECT_DP_MST)
2071 goto out;
2072
2073 *pipe = i;
2074 ret = true;
2075
2076 goto out;
2057 } 2077 }
2058 } 2078 }
2059 2079
2060 DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port)); 2080 DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
2061 2081
2062 return false; 2082out:
2083 intel_display_power_put(dev_priv, power_domain);
2084
2085 return ret;
2063} 2086}
2064 2087
2065void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) 2088void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
@@ -2508,12 +2531,14 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
2508{ 2531{
2509 uint32_t val; 2532 uint32_t val;
2510 2533
2511 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 2534 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
2512 return false; 2535 return false;
2513 2536
2514 val = I915_READ(WRPLL_CTL(pll->id)); 2537 val = I915_READ(WRPLL_CTL(pll->id));
2515 hw_state->wrpll = val; 2538 hw_state->wrpll = val;
2516 2539
2540 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
2541
2517 return val & WRPLL_PLL_ENABLE; 2542 return val & WRPLL_PLL_ENABLE;
2518} 2543}
2519 2544
@@ -2523,12 +2548,14 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
2523{ 2548{
2524 uint32_t val; 2549 uint32_t val;
2525 2550
2526 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 2551 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
2527 return false; 2552 return false;
2528 2553
2529 val = I915_READ(SPLL_CTL); 2554 val = I915_READ(SPLL_CTL);
2530 hw_state->spll = val; 2555 hw_state->spll = val;
2531 2556
2557 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
2558
2532 return val & SPLL_PLL_ENABLE; 2559 return val & SPLL_PLL_ENABLE;
2533} 2560}
2534 2561
@@ -2645,16 +2672,19 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2645 uint32_t val; 2672 uint32_t val;
2646 unsigned int dpll; 2673 unsigned int dpll;
2647 const struct skl_dpll_regs *regs = skl_dpll_regs; 2674 const struct skl_dpll_regs *regs = skl_dpll_regs;
2675 bool ret;
2648 2676
2649 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 2677 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
2650 return false; 2678 return false;
2651 2679
2680 ret = false;
2681
2652 /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */ 2682 /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
2653 dpll = pll->id + 1; 2683 dpll = pll->id + 1;
2654 2684
2655 val = I915_READ(regs[pll->id].ctl); 2685 val = I915_READ(regs[pll->id].ctl);
2656 if (!(val & LCPLL_PLL_ENABLE)) 2686 if (!(val & LCPLL_PLL_ENABLE))
2657 return false; 2687 goto out;
2658 2688
2659 val = I915_READ(DPLL_CTRL1); 2689 val = I915_READ(DPLL_CTRL1);
2660 hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f; 2690 hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f;
@@ -2664,8 +2694,12 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2664 hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1); 2694 hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
2665 hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2); 2695 hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
2666 } 2696 }
2697 ret = true;
2667 2698
2668 return true; 2699out:
2700 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
2701
2702 return ret;
2669} 2703}
2670 2704
2671static void skl_shared_dplls_init(struct drm_i915_private *dev_priv) 2705static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
@@ -2932,13 +2966,16 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2932{ 2966{
2933 enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ 2967 enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
2934 uint32_t val; 2968 uint32_t val;
2969 bool ret;
2935 2970
2936 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 2971 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
2937 return false; 2972 return false;
2938 2973
2974 ret = false;
2975
2939 val = I915_READ(BXT_PORT_PLL_ENABLE(port)); 2976 val = I915_READ(BXT_PORT_PLL_ENABLE(port));
2940 if (!(val & PORT_PLL_ENABLE)) 2977 if (!(val & PORT_PLL_ENABLE))
2941 return false; 2978 goto out;
2942 2979
2943 hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port)); 2980 hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
2944 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK; 2981 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
@@ -2985,7 +3022,12 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2985 I915_READ(BXT_PORT_PCS_DW12_LN23(port))); 3022 I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
2986 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD; 3023 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2987 3024
2988 return true; 3025 ret = true;
3026
3027out:
3028 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
3029
3030 return ret;
2989} 3031}
2990 3032
2991static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv) 3033static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv)
@@ -3120,11 +3162,15 @@ bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
3120{ 3162{
3121 u32 temp; 3163 u32 temp;
3122 3164
3123 if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) { 3165 if (intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
3124 temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); 3166 temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
3167
3168 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
3169
3125 if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe)) 3170 if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
3126 return true; 3171 return true;
3127 } 3172 }
3173
3128 return false; 3174 return false;
3129} 3175}
3130 3176
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5feb65725c04..46947fffd599 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1351,18 +1351,21 @@ void assert_pipe(struct drm_i915_private *dev_priv,
1351 bool cur_state; 1351 bool cur_state;
1352 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1352 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1353 pipe); 1353 pipe);
1354 enum intel_display_power_domain power_domain;
1354 1355
1355 /* if we need the pipe quirk it must be always on */ 1356 /* if we need the pipe quirk it must be always on */
1356 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1357 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1357 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1358 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1358 state = true; 1359 state = true;
1359 1360
1360 if (!intel_display_power_is_enabled(dev_priv, 1361 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1361 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) { 1362 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
1362 cur_state = false;
1363 } else {
1364 u32 val = I915_READ(PIPECONF(cpu_transcoder)); 1363 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1365 cur_state = !!(val & PIPECONF_ENABLE); 1364 cur_state = !!(val & PIPECONF_ENABLE);
1365
1366 intel_display_power_put(dev_priv, power_domain);
1367 } else {
1368 cur_state = false;
1366 } 1369 }
1367 1370
1368 I915_STATE_WARN(cur_state != state, 1371 I915_STATE_WARN(cur_state != state,
@@ -8171,18 +8174,22 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8171{ 8174{
8172 struct drm_device *dev = crtc->base.dev; 8175 struct drm_device *dev = crtc->base.dev;
8173 struct drm_i915_private *dev_priv = dev->dev_private; 8176 struct drm_i915_private *dev_priv = dev->dev_private;
8177 enum intel_display_power_domain power_domain;
8174 uint32_t tmp; 8178 uint32_t tmp;
8179 bool ret;
8175 8180
8176 if (!intel_display_power_is_enabled(dev_priv, 8181 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8177 POWER_DOMAIN_PIPE(crtc->pipe))) 8182 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8178 return false; 8183 return false;
8179 8184
8180 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8185 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8181 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 8186 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8182 8187
8188 ret = false;
8189
8183 tmp = I915_READ(PIPECONF(crtc->pipe)); 8190 tmp = I915_READ(PIPECONF(crtc->pipe));
8184 if (!(tmp & PIPECONF_ENABLE)) 8191 if (!(tmp & PIPECONF_ENABLE))
8185 return false; 8192 goto out;
8186 8193
8187 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 8194 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
8188 switch (tmp & PIPECONF_BPC_MASK) { 8195 switch (tmp & PIPECONF_BPC_MASK) {
@@ -8262,7 +8269,12 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8262 pipe_config->base.adjusted_mode.crtc_clock = 8269 pipe_config->base.adjusted_mode.crtc_clock =
8263 pipe_config->port_clock / pipe_config->pixel_multiplier; 8270 pipe_config->port_clock / pipe_config->pixel_multiplier;
8264 8271
8265 return true; 8272 ret = true;
8273
8274out:
8275 intel_display_power_put(dev_priv, power_domain);
8276
8277 return ret;
8266} 8278}
8267 8279
8268static void ironlake_init_pch_refclk(struct drm_device *dev) 8280static void ironlake_init_pch_refclk(struct drm_device *dev)
@@ -9366,18 +9378,21 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9366{ 9378{
9367 struct drm_device *dev = crtc->base.dev; 9379 struct drm_device *dev = crtc->base.dev;
9368 struct drm_i915_private *dev_priv = dev->dev_private; 9380 struct drm_i915_private *dev_priv = dev->dev_private;
9381 enum intel_display_power_domain power_domain;
9369 uint32_t tmp; 9382 uint32_t tmp;
9383 bool ret;
9370 9384
9371 if (!intel_display_power_is_enabled(dev_priv, 9385 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9372 POWER_DOMAIN_PIPE(crtc->pipe))) 9386 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9373 return false; 9387 return false;
9374 9388
9375 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9389 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9376 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 9390 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9377 9391
9392 ret = false;
9378 tmp = I915_READ(PIPECONF(crtc->pipe)); 9393 tmp = I915_READ(PIPECONF(crtc->pipe));
9379 if (!(tmp & PIPECONF_ENABLE)) 9394 if (!(tmp & PIPECONF_ENABLE))
9380 return false; 9395 goto out;
9381 9396
9382 switch (tmp & PIPECONF_BPC_MASK) { 9397 switch (tmp & PIPECONF_BPC_MASK) {
9383 case PIPECONF_6BPC: 9398 case PIPECONF_6BPC:
@@ -9440,7 +9455,12 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9440 9455
9441 ironlake_get_pfit_config(crtc, pipe_config); 9456 ironlake_get_pfit_config(crtc, pipe_config);
9442 9457
9443 return true; 9458 ret = true;
9459
9460out:
9461 intel_display_power_put(dev_priv, power_domain);
9462
9463 return ret;
9444} 9464}
9445 9465
9446static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 9466static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
@@ -9950,12 +9970,17 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9950{ 9970{
9951 struct drm_device *dev = crtc->base.dev; 9971 struct drm_device *dev = crtc->base.dev;
9952 struct drm_i915_private *dev_priv = dev->dev_private; 9972 struct drm_i915_private *dev_priv = dev->dev_private;
9953 enum intel_display_power_domain pfit_domain; 9973 enum intel_display_power_domain power_domain;
9974 unsigned long power_domain_mask;
9954 uint32_t tmp; 9975 uint32_t tmp;
9976 bool ret;
9955 9977
9956 if (!intel_display_power_is_enabled(dev_priv, 9978 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9957 POWER_DOMAIN_PIPE(crtc->pipe))) 9979 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9958 return false; 9980 return false;
9981 power_domain_mask = BIT(power_domain);
9982
9983 ret = false;
9959 9984
9960 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9985 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9961 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 9986 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -9982,13 +10007,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9982 pipe_config->cpu_transcoder = TRANSCODER_EDP; 10007 pipe_config->cpu_transcoder = TRANSCODER_EDP;
9983 } 10008 }
9984 10009
9985 if (!intel_display_power_is_enabled(dev_priv, 10010 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9986 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 10011 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9987 return false; 10012 goto out;
10013 power_domain_mask |= BIT(power_domain);
9988 10014
9989 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 10015 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9990 if (!(tmp & PIPECONF_ENABLE)) 10016 if (!(tmp & PIPECONF_ENABLE))
9991 return false; 10017 goto out;
9992 10018
9993 haswell_get_ddi_port_state(crtc, pipe_config); 10019 haswell_get_ddi_port_state(crtc, pipe_config);
9994 10020
@@ -9998,14 +10024,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9998 skl_init_scalers(dev, crtc, pipe_config); 10024 skl_init_scalers(dev, crtc, pipe_config);
9999 } 10025 }
10000 10026
10001 pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10002
10003 if (INTEL_INFO(dev)->gen >= 9) { 10027 if (INTEL_INFO(dev)->gen >= 9) {
10004 pipe_config->scaler_state.scaler_id = -1; 10028 pipe_config->scaler_state.scaler_id = -1;
10005 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); 10029 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
10006 } 10030 }
10007 10031
10008 if (intel_display_power_is_enabled(dev_priv, pfit_domain)) { 10032 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10033 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
10034 power_domain_mask |= BIT(power_domain);
10009 if (INTEL_INFO(dev)->gen >= 9) 10035 if (INTEL_INFO(dev)->gen >= 9)
10010 skylake_get_pfit_config(crtc, pipe_config); 10036 skylake_get_pfit_config(crtc, pipe_config);
10011 else 10037 else
@@ -10023,7 +10049,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10023 pipe_config->pixel_multiplier = 1; 10049 pipe_config->pixel_multiplier = 1;
10024 } 10050 }
10025 10051
10026 return true; 10052 ret = true;
10053
10054out:
10055 for_each_power_domain(power_domain, power_domain_mask)
10056 intel_display_power_put(dev_priv, power_domain);
10057
10058 return ret;
10027} 10059}
10028 10060
10029static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on) 10061static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
@@ -13630,7 +13662,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
13630{ 13662{
13631 uint32_t val; 13663 uint32_t val;
13632 13664
13633 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 13665 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
13634 return false; 13666 return false;
13635 13667
13636 val = I915_READ(PCH_DPLL(pll->id)); 13668 val = I915_READ(PCH_DPLL(pll->id));
@@ -13638,6 +13670,8 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
13638 hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); 13670 hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
13639 hw_state->fp1 = I915_READ(PCH_FP1(pll->id)); 13671 hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
13640 13672
13673 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
13674
13641 return val & DPLL_VCO_ENABLE; 13675 return val & DPLL_VCO_ENABLE;
13642} 13676}
13643 13677
@@ -15568,10 +15602,12 @@ void i915_redisable_vga(struct drm_device *dev)
15568 * level, just check if the power well is enabled instead of trying to 15602 * level, just check if the power well is enabled instead of trying to
15569 * follow the "don't touch the power well if we don't need it" policy 15603 * follow the "don't touch the power well if we don't need it" policy
15570 * the rest of the driver uses. */ 15604 * the rest of the driver uses. */
15571 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA)) 15605 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
15572 return; 15606 return;
15573 15607
15574 i915_redisable_vga_power_on(dev); 15608 i915_redisable_vga_power_on(dev);
15609
15610 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
15575} 15611}
15576 15612
15577static bool primary_get_hw_state(struct intel_plane *plane) 15613static bool primary_get_hw_state(struct intel_plane *plane)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 796e3d313cb9..1d8de43bed56 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2362,15 +2362,18 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2362 struct drm_i915_private *dev_priv = dev->dev_private; 2362 struct drm_i915_private *dev_priv = dev->dev_private;
2363 enum intel_display_power_domain power_domain; 2363 enum intel_display_power_domain power_domain;
2364 u32 tmp; 2364 u32 tmp;
2365 bool ret;
2365 2366
2366 power_domain = intel_display_port_power_domain(encoder); 2367 power_domain = intel_display_port_power_domain(encoder);
2367 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 2368 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2368 return false; 2369 return false;
2369 2370
2371 ret = false;
2372
2370 tmp = I915_READ(intel_dp->output_reg); 2373 tmp = I915_READ(intel_dp->output_reg);
2371 2374
2372 if (!(tmp & DP_PORT_EN)) 2375 if (!(tmp & DP_PORT_EN))
2373 return false; 2376 goto out;
2374 2377
2375 if (IS_GEN7(dev) && port == PORT_A) { 2378 if (IS_GEN7(dev) && port == PORT_A) {
2376 *pipe = PORT_TO_PIPE_CPT(tmp); 2379 *pipe = PORT_TO_PIPE_CPT(tmp);
@@ -2381,7 +2384,9 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2381 u32 trans_dp = I915_READ(TRANS_DP_CTL(p)); 2384 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2382 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) { 2385 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2383 *pipe = p; 2386 *pipe = p;
2384 return true; 2387 ret = true;
2388
2389 goto out;
2385 } 2390 }
2386 } 2391 }
2387 2392
@@ -2393,7 +2398,12 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2393 *pipe = PORT_TO_PIPE(tmp); 2398 *pipe = PORT_TO_PIPE(tmp);
2394 } 2399 }
2395 2400
2396 return true; 2401 ret = true;
2402
2403out:
2404 intel_display_power_put(dev_priv, power_domain);
2405
2406 return ret;
2397} 2407}
2398 2408
2399static void intel_dp_get_config(struct intel_encoder *encoder, 2409static void intel_dp_get_config(struct intel_encoder *encoder,
@@ -4493,20 +4503,20 @@ static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4493 return I915_READ(PORT_HOTPLUG_STAT) & bit; 4503 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4494} 4504}
4495 4505
4496static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv, 4506static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4497 struct intel_digital_port *port) 4507 struct intel_digital_port *port)
4498{ 4508{
4499 u32 bit; 4509 u32 bit;
4500 4510
4501 switch (port->port) { 4511 switch (port->port) {
4502 case PORT_B: 4512 case PORT_B:
4503 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV; 4513 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4504 break; 4514 break;
4505 case PORT_C: 4515 case PORT_C:
4506 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV; 4516 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4507 break; 4517 break;
4508 case PORT_D: 4518 case PORT_D:
4509 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV; 4519 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4510 break; 4520 break;
4511 default: 4521 default:
4512 MISSING_CASE(port->port); 4522 MISSING_CASE(port->port);
@@ -4558,8 +4568,8 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4558 return cpt_digital_port_connected(dev_priv, port); 4568 return cpt_digital_port_connected(dev_priv, port);
4559 else if (IS_BROXTON(dev_priv)) 4569 else if (IS_BROXTON(dev_priv))
4560 return bxt_digital_port_connected(dev_priv, port); 4570 return bxt_digital_port_connected(dev_priv, port);
4561 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4571 else if (IS_GM45(dev_priv))
4562 return vlv_digital_port_connected(dev_priv, port); 4572 return gm45_digital_port_connected(dev_priv, port);
4563 else 4573 else
4564 return g4x_digital_port_connected(dev_priv, port); 4574 return g4x_digital_port_connected(dev_priv, port);
4565} 4575}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ea5415851c6e..df7f3cb66056 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1428,6 +1428,8 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
1428 enum intel_display_power_domain domain); 1428 enum intel_display_power_domain domain);
1429void intel_display_power_get(struct drm_i915_private *dev_priv, 1429void intel_display_power_get(struct drm_i915_private *dev_priv,
1430 enum intel_display_power_domain domain); 1430 enum intel_display_power_domain domain);
1431bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1432 enum intel_display_power_domain domain);
1431void intel_display_power_put(struct drm_i915_private *dev_priv, 1433void intel_display_power_put(struct drm_i915_private *dev_priv,
1432 enum intel_display_power_domain domain); 1434 enum intel_display_power_domain domain);
1433 1435
@@ -1514,6 +1516,7 @@ enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
1514 enable_rpm_wakeref_asserts(dev_priv) 1516 enable_rpm_wakeref_asserts(dev_priv)
1515 1517
1516void intel_runtime_pm_get(struct drm_i915_private *dev_priv); 1518void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
1519bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv);
1517void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); 1520void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
1518void intel_runtime_pm_put(struct drm_i915_private *dev_priv); 1521void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
1519 1522
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 44742fa2f616..0193c62a53ef 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -664,13 +664,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
664 struct drm_device *dev = encoder->base.dev; 664 struct drm_device *dev = encoder->base.dev;
665 enum intel_display_power_domain power_domain; 665 enum intel_display_power_domain power_domain;
666 enum port port; 666 enum port port;
667 bool ret;
667 668
668 DRM_DEBUG_KMS("\n"); 669 DRM_DEBUG_KMS("\n");
669 670
670 power_domain = intel_display_port_power_domain(encoder); 671 power_domain = intel_display_port_power_domain(encoder);
671 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 672 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
672 return false; 673 return false;
673 674
675 ret = false;
676
674 /* XXX: this only works for one DSI output */ 677 /* XXX: this only works for one DSI output */
675 for_each_dsi_port(port, intel_dsi->ports) { 678 for_each_dsi_port(port, intel_dsi->ports) {
676 i915_reg_t ctrl_reg = IS_BROXTON(dev) ? 679 i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
@@ -691,12 +694,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
691 if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) { 694 if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) {
692 if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) { 695 if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) {
693 *pipe = port == PORT_A ? PIPE_A : PIPE_B; 696 *pipe = port == PORT_A ? PIPE_A : PIPE_B;
694 return true; 697 ret = true;
698
699 goto out;
695 } 700 }
696 } 701 }
697 } 702 }
703out:
704 intel_display_power_put(dev_priv, power_domain);
698 705
699 return false; 706 return ret;
700} 707}
701 708
702static void intel_dsi_get_config(struct intel_encoder *encoder, 709static void intel_dsi_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4a77639a489d..cb5d1b15755c 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -880,15 +880,18 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
880 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 880 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
881 enum intel_display_power_domain power_domain; 881 enum intel_display_power_domain power_domain;
882 u32 tmp; 882 u32 tmp;
883 bool ret;
883 884
884 power_domain = intel_display_port_power_domain(encoder); 885 power_domain = intel_display_port_power_domain(encoder);
885 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 886 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
886 return false; 887 return false;
887 888
889 ret = false;
890
888 tmp = I915_READ(intel_hdmi->hdmi_reg); 891 tmp = I915_READ(intel_hdmi->hdmi_reg);
889 892
890 if (!(tmp & SDVO_ENABLE)) 893 if (!(tmp & SDVO_ENABLE))
891 return false; 894 goto out;
892 895
893 if (HAS_PCH_CPT(dev)) 896 if (HAS_PCH_CPT(dev))
894 *pipe = PORT_TO_PIPE_CPT(tmp); 897 *pipe = PORT_TO_PIPE_CPT(tmp);
@@ -897,7 +900,12 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
897 else 900 else
898 *pipe = PORT_TO_PIPE(tmp); 901 *pipe = PORT_TO_PIPE(tmp);
899 902
900 return true; 903 ret = true;
904
905out:
906 intel_display_power_put(dev_priv, power_domain);
907
908 return ret;
901} 909}
902 910
903static void intel_hdmi_get_config(struct intel_encoder *encoder, 911static void intel_hdmi_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 0da0240caf81..bc04d8d29acb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -75,22 +75,30 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
75 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 75 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
76 enum intel_display_power_domain power_domain; 76 enum intel_display_power_domain power_domain;
77 u32 tmp; 77 u32 tmp;
78 bool ret;
78 79
79 power_domain = intel_display_port_power_domain(encoder); 80 power_domain = intel_display_port_power_domain(encoder);
80 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 81 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
81 return false; 82 return false;
82 83
84 ret = false;
85
83 tmp = I915_READ(lvds_encoder->reg); 86 tmp = I915_READ(lvds_encoder->reg);
84 87
85 if (!(tmp & LVDS_PORT_EN)) 88 if (!(tmp & LVDS_PORT_EN))
86 return false; 89 goto out;
87 90
88 if (HAS_PCH_CPT(dev)) 91 if (HAS_PCH_CPT(dev))
89 *pipe = PORT_TO_PIPE_CPT(tmp); 92 *pipe = PORT_TO_PIPE_CPT(tmp);
90 else 93 else
91 *pipe = PORT_TO_PIPE(tmp); 94 *pipe = PORT_TO_PIPE(tmp);
92 95
93 return true; 96 ret = true;
97
98out:
99 intel_display_power_put(dev_priv, power_domain);
100
101 return ret;
94} 102}
95 103
96static void intel_lvds_get_config(struct intel_encoder *encoder, 104static void intel_lvds_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a234687792f0..b28c29f20e75 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2829,7 +2829,10 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2829 memset(ddb, 0, sizeof(*ddb)); 2829 memset(ddb, 0, sizeof(*ddb));
2830 2830
2831 for_each_pipe(dev_priv, pipe) { 2831 for_each_pipe(dev_priv, pipe) {
2832 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) 2832 enum intel_display_power_domain power_domain;
2833
2834 power_domain = POWER_DOMAIN_PIPE(pipe);
2835 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2833 continue; 2836 continue;
2834 2837
2835 for_each_plane(dev_priv, pipe, plane) { 2838 for_each_plane(dev_priv, pipe, plane) {
@@ -2841,6 +2844,8 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2841 val = I915_READ(CUR_BUF_CFG(pipe)); 2844 val = I915_READ(CUR_BUF_CFG(pipe));
2842 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR], 2845 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
2843 val); 2846 val);
2847
2848 intel_display_power_put(dev_priv, power_domain);
2844 } 2849 }
2845} 2850}
2846 2851
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index ddbdbffe829a..4f43d9b32e66 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -470,6 +470,43 @@ static void gen9_set_dc_state_debugmask_memory_up(
470 } 470 }
471} 471}
472 472
473static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
474 u32 state)
475{
476 int rewrites = 0;
477 int rereads = 0;
478 u32 v;
479
480 I915_WRITE(DC_STATE_EN, state);
481
482 /* It has been observed that disabling the dc6 state sometimes
483 * doesn't stick and dmc keeps returning old value. Make sure
484 * the write really sticks enough times and also force rewrite until
485 * we are confident that state is exactly what we want.
486 */
487 do {
488 v = I915_READ(DC_STATE_EN);
489
490 if (v != state) {
491 I915_WRITE(DC_STATE_EN, state);
492 rewrites++;
493 rereads = 0;
494 } else if (rereads++ > 5) {
495 break;
496 }
497
498 } while (rewrites < 100);
499
500 if (v != state)
501 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
502 state, v);
503
504 /* Most of the times we need one retry, avoid spam */
505 if (rewrites > 1)
506 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
507 state, rewrites);
508}
509
473static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state) 510static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
474{ 511{
475 uint32_t val; 512 uint32_t val;
@@ -494,10 +531,18 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
494 val = I915_READ(DC_STATE_EN); 531 val = I915_READ(DC_STATE_EN);
495 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", 532 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
496 val & mask, state); 533 val & mask, state);
534
535 /* Check if DMC is ignoring our DC state requests */
536 if ((val & mask) != dev_priv->csr.dc_state)
537 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
538 dev_priv->csr.dc_state, val & mask);
539
497 val &= ~mask; 540 val &= ~mask;
498 val |= state; 541 val |= state;
499 I915_WRITE(DC_STATE_EN, val); 542
500 POSTING_READ(DC_STATE_EN); 543 gen9_write_dc_state(dev_priv, val);
544
545 dev_priv->csr.dc_state = val & mask;
501} 546}
502 547
503void bxt_enable_dc9(struct drm_i915_private *dev_priv) 548void bxt_enable_dc9(struct drm_i915_private *dev_priv)
@@ -1442,6 +1487,22 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1442 chv_set_pipe_power_well(dev_priv, power_well, false); 1487 chv_set_pipe_power_well(dev_priv, power_well, false);
1443} 1488}
1444 1489
1490static void
1491__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1492 enum intel_display_power_domain domain)
1493{
1494 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1495 struct i915_power_well *power_well;
1496 int i;
1497
1498 for_each_power_well(i, power_well, BIT(domain), power_domains) {
1499 if (!power_well->count++)
1500 intel_power_well_enable(dev_priv, power_well);
1501 }
1502
1503 power_domains->domain_use_count[domain]++;
1504}
1505
1445/** 1506/**
1446 * intel_display_power_get - grab a power domain reference 1507 * intel_display_power_get - grab a power domain reference
1447 * @dev_priv: i915 device instance 1508 * @dev_priv: i915 device instance
@@ -1457,24 +1518,53 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1457void intel_display_power_get(struct drm_i915_private *dev_priv, 1518void intel_display_power_get(struct drm_i915_private *dev_priv,
1458 enum intel_display_power_domain domain) 1519 enum intel_display_power_domain domain)
1459{ 1520{
1460 struct i915_power_domains *power_domains; 1521 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1461 struct i915_power_well *power_well;
1462 int i;
1463 1522
1464 intel_runtime_pm_get(dev_priv); 1523 intel_runtime_pm_get(dev_priv);
1465 1524
1466 power_domains = &dev_priv->power_domains; 1525 mutex_lock(&power_domains->lock);
1526
1527 __intel_display_power_get_domain(dev_priv, domain);
1528
1529 mutex_unlock(&power_domains->lock);
1530}
1531
1532/**
1533 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1534 * @dev_priv: i915 device instance
1535 * @domain: power domain to reference
1536 *
1537 * This function grabs a power domain reference for @domain and ensures that the
1538 * power domain and all its parents are powered up. Therefore users should only
1539 * grab a reference to the innermost power domain they need.
1540 *
1541 * Any power domain reference obtained by this function must have a symmetric
1542 * call to intel_display_power_put() to release the reference again.
1543 */
1544bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1545 enum intel_display_power_domain domain)
1546{
1547 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1548 bool is_enabled;
1549
1550 if (!intel_runtime_pm_get_if_in_use(dev_priv))
1551 return false;
1467 1552
1468 mutex_lock(&power_domains->lock); 1553 mutex_lock(&power_domains->lock);
1469 1554
1470 for_each_power_well(i, power_well, BIT(domain), power_domains) { 1555 if (__intel_display_power_is_enabled(dev_priv, domain)) {
1471 if (!power_well->count++) 1556 __intel_display_power_get_domain(dev_priv, domain);
1472 intel_power_well_enable(dev_priv, power_well); 1557 is_enabled = true;
1558 } else {
1559 is_enabled = false;
1473 } 1560 }
1474 1561
1475 power_domains->domain_use_count[domain]++;
1476
1477 mutex_unlock(&power_domains->lock); 1562 mutex_unlock(&power_domains->lock);
1563
1564 if (!is_enabled)
1565 intel_runtime_pm_put(dev_priv);
1566
1567 return is_enabled;
1478} 1568}
1479 1569
1480/** 1570/**
@@ -2213,15 +2303,15 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
2213 */ 2303 */
2214void intel_power_domains_suspend(struct drm_i915_private *dev_priv) 2304void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
2215{ 2305{
2216 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
2217 skl_display_core_uninit(dev_priv);
2218
2219 /* 2306 /*
2220 * Even if power well support was disabled we still want to disable 2307 * Even if power well support was disabled we still want to disable
2221 * power wells while we are system suspended. 2308 * power wells while we are system suspended.
2222 */ 2309 */
2223 if (!i915.disable_power_well) 2310 if (!i915.disable_power_well)
2224 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 2311 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2312
2313 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
2314 skl_display_core_uninit(dev_priv);
2225} 2315}
2226 2316
2227/** 2317/**
@@ -2246,6 +2336,41 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2246} 2336}
2247 2337
2248/** 2338/**
2339 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
2340 * @dev_priv: i915 device instance
2341 *
2342 * This function grabs a device-level runtime pm reference if the device is
2343 * already in use and ensures that it is powered up.
2344 *
2345 * Any runtime pm reference obtained by this function must have a symmetric
2346 * call to intel_runtime_pm_put() to release the reference again.
2347 */
2348bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
2349{
2350 struct drm_device *dev = dev_priv->dev;
2351 struct device *device = &dev->pdev->dev;
2352
2353 if (IS_ENABLED(CONFIG_PM)) {
2354 int ret = pm_runtime_get_if_in_use(device);
2355
2356 /*
2357 * In cases runtime PM is disabled by the RPM core and we get
2358 * an -EINVAL return value we are not supposed to call this
2359 * function, since the power state is undefined. This applies
2360 * atm to the late/early system suspend/resume handlers.
2361 */
2362 WARN_ON_ONCE(ret < 0);
2363 if (ret <= 0)
2364 return false;
2365 }
2366
2367 atomic_inc(&dev_priv->pm.wakeref_count);
2368 assert_rpm_wakelock_held(dev_priv);
2369
2370 return true;
2371}
2372
2373/**
2249 * intel_runtime_pm_get_noresume - grab a runtime pm reference 2374 * intel_runtime_pm_get_noresume - grab a runtime pm reference
2250 * @dev_priv: i915 device instance 2375 * @dev_priv: i915 device instance
2251 * 2376 *
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 78f520d05de9..e3acc35e3805 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1520,7 +1520,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1520 DMA_BIDIRECTIONAL); 1520 DMA_BIDIRECTIONAL);
1521 1521
1522 if (dma_mapping_error(pdev, addr)) { 1522 if (dma_mapping_error(pdev, addr)) {
1523 while (--i) { 1523 while (i--) {
1524 dma_unmap_page(pdev, ttm_dma->dma_address[i], 1524 dma_unmap_page(pdev, ttm_dma->dma_address[i],
1525 PAGE_SIZE, DMA_BIDIRECTIONAL); 1525 PAGE_SIZE, DMA_BIDIRECTIONAL);
1526 ttm_dma->dma_address[i] = 0; 1526 ttm_dma->dma_address[i] = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 24be27d3cd18..20935eb2a09e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -635,10 +635,6 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
635 nv_crtc->lut.depth = 0; 635 nv_crtc->lut.depth = 0;
636 } 636 }
637 637
638 /* Make sure that drm and hw vblank irqs get resumed if needed. */
639 for (head = 0; head < dev->mode_config.num_crtc; head++)
640 drm_vblank_on(dev, head);
641
642 /* This should ensure we don't hit a locking problem when someone 638 /* This should ensure we don't hit a locking problem when someone
643 * wakes us up via a connector. We should never go into suspend 639 * wakes us up via a connector. We should never go into suspend
644 * while the display is on anyways. 640 * while the display is on anyways.
@@ -648,6 +644,10 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
648 644
649 drm_helper_resume_force_mode(dev); 645 drm_helper_resume_force_mode(dev);
650 646
647 /* Make sure that drm and hw vblank irqs get resumed if needed. */
648 for (head = 0; head < dev->mode_config.num_crtc; head++)
649 drm_vblank_on(dev, head);
650
651 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 651 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
652 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 652 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
653 653
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 8a70cec59bcd..2dfe58af12e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -24,7 +24,7 @@
24static int nouveau_platform_probe(struct platform_device *pdev) 24static int nouveau_platform_probe(struct platform_device *pdev)
25{ 25{
26 const struct nvkm_device_tegra_func *func; 26 const struct nvkm_device_tegra_func *func;
27 struct nvkm_device *device; 27 struct nvkm_device *device = NULL;
28 struct drm_device *drm; 28 struct drm_device *drm;
29 int ret; 29 int ret;
30 30
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 7f8a42721eb2..e7e581d6a8ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -252,32 +252,40 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
252 252
253 if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL))) 253 if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
254 return -ENOMEM; 254 return -ENOMEM;
255 *pdevice = &tdev->device; 255
256 tdev->func = func; 256 tdev->func = func;
257 tdev->pdev = pdev; 257 tdev->pdev = pdev;
258 tdev->irq = -1; 258 tdev->irq = -1;
259 259
260 tdev->vdd = devm_regulator_get(&pdev->dev, "vdd"); 260 tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
261 if (IS_ERR(tdev->vdd)) 261 if (IS_ERR(tdev->vdd)) {
262 return PTR_ERR(tdev->vdd); 262 ret = PTR_ERR(tdev->vdd);
263 goto free;
264 }
263 265
264 tdev->rst = devm_reset_control_get(&pdev->dev, "gpu"); 266 tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
265 if (IS_ERR(tdev->rst)) 267 if (IS_ERR(tdev->rst)) {
266 return PTR_ERR(tdev->rst); 268 ret = PTR_ERR(tdev->rst);
269 goto free;
270 }
267 271
268 tdev->clk = devm_clk_get(&pdev->dev, "gpu"); 272 tdev->clk = devm_clk_get(&pdev->dev, "gpu");
269 if (IS_ERR(tdev->clk)) 273 if (IS_ERR(tdev->clk)) {
270 return PTR_ERR(tdev->clk); 274 ret = PTR_ERR(tdev->clk);
275 goto free;
276 }
271 277
272 tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr"); 278 tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
273 if (IS_ERR(tdev->clk_pwr)) 279 if (IS_ERR(tdev->clk_pwr)) {
274 return PTR_ERR(tdev->clk_pwr); 280 ret = PTR_ERR(tdev->clk_pwr);
281 goto free;
282 }
275 283
276 nvkm_device_tegra_probe_iommu(tdev); 284 nvkm_device_tegra_probe_iommu(tdev);
277 285
278 ret = nvkm_device_tegra_power_up(tdev); 286 ret = nvkm_device_tegra_power_up(tdev);
279 if (ret) 287 if (ret)
280 return ret; 288 goto remove;
281 289
282 tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value; 290 tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
283 ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev, 291 ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
@@ -285,9 +293,19 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
285 cfg, dbg, detect, mmio, subdev_mask, 293 cfg, dbg, detect, mmio, subdev_mask,
286 &tdev->device); 294 &tdev->device);
287 if (ret) 295 if (ret)
288 return ret; 296 goto powerdown;
297
298 *pdevice = &tdev->device;
289 299
290 return 0; 300 return 0;
301
302powerdown:
303 nvkm_device_tegra_power_down(tdev);
304remove:
305 nvkm_device_tegra_remove_iommu(tdev);
306free:
307 kfree(tdev);
308 return ret;
291} 309}
292#else 310#else
293int 311int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
index 74e2f7c6c07e..9688970eca47 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
@@ -328,6 +328,7 @@ nvkm_dp_train(struct work_struct *w)
328 .outp = outp, 328 .outp = outp,
329 }, *dp = &_dp; 329 }, *dp = &_dp;
330 u32 datarate = 0; 330 u32 datarate = 0;
331 u8 pwr;
331 int ret; 332 int ret;
332 333
333 if (!outp->base.info.location && disp->func->sor.magic) 334 if (!outp->base.info.location && disp->func->sor.magic)
@@ -355,6 +356,15 @@ nvkm_dp_train(struct work_struct *w)
355 /* disable link interrupt handling during link training */ 356 /* disable link interrupt handling during link training */
356 nvkm_notify_put(&outp->irq); 357 nvkm_notify_put(&outp->irq);
357 358
359 /* ensure sink is not in a low-power state */
360 if (!nvkm_rdaux(outp->aux, DPCD_SC00, &pwr, 1)) {
361 if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
362 pwr &= ~DPCD_SC00_SET_POWER;
363 pwr |= DPCD_SC00_SET_POWER_D0;
364 nvkm_wraux(outp->aux, DPCD_SC00, &pwr, 1);
365 }
366 }
367
358 /* enable down-spreading and execute pre-train script from vbios */ 368 /* enable down-spreading and execute pre-train script from vbios */
359 dp_link_train_init(dp, outp->dpcd[3] & 0x01); 369 dp_link_train_init(dp, outp->dpcd[3] & 0x01);
360 370
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
index 9596290329c7..6e10c5e0ef11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
@@ -71,5 +71,11 @@
71#define DPCD_LS0C_LANE1_POST_CURSOR2 0x0c 71#define DPCD_LS0C_LANE1_POST_CURSOR2 0x0c
72#define DPCD_LS0C_LANE0_POST_CURSOR2 0x03 72#define DPCD_LS0C_LANE0_POST_CURSOR2 0x03
73 73
74/* DPCD Sink Control */
75#define DPCD_SC00 0x00600
76#define DPCD_SC00_SET_POWER 0x03
77#define DPCD_SC00_SET_POWER_D0 0x01
78#define DPCD_SC00_SET_POWER_D3 0x03
79
74void nvkm_dp_train(struct work_struct *); 80void nvkm_dp_train(struct work_struct *);
75#endif 81#endif
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 2ae8577497ca..7c2e78201ead 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -168,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
168 cmd->command_size)) 168 cmd->command_size))
169 return -EFAULT; 169 return -EFAULT;
170 170
171 reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL); 171 reloc_info = kmalloc_array(cmd->relocs_num,
172 sizeof(struct qxl_reloc_info), GFP_KERNEL);
172 if (!reloc_info) 173 if (!reloc_info)
173 return -ENOMEM; 174 return -ENOMEM;
174 175
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
index 3d031b50a8fd..9f029dda1f07 100644
--- a/drivers/gpu/drm/qxl/qxl_prime.c
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -68,5 +68,5 @@ int qxl_gem_prime_mmap(struct drm_gem_object *obj,
68 struct vm_area_struct *area) 68 struct vm_area_struct *area)
69{ 69{
70 WARN_ONCE(1, "not implemented"); 70 WARN_ONCE(1, "not implemented");
71 return ENOSYS; 71 return -ENOSYS;
72} 72}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 298ea1c453c3..2b9ba03a7c1a 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -403,7 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
403 struct drm_crtc *crtc = &radeon_crtc->base; 403 struct drm_crtc *crtc = &radeon_crtc->base;
404 unsigned long flags; 404 unsigned long flags;
405 int r; 405 int r;
406 int vpos, hpos, stat, min_udelay; 406 int vpos, hpos, stat, min_udelay = 0;
407 unsigned repcnt = 4;
407 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; 408 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
408 409
409 down_read(&rdev->exclusive_lock); 410 down_read(&rdev->exclusive_lock);
@@ -454,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
454 * In practice this won't execute very often unless on very fast 455 * In practice this won't execute very often unless on very fast
455 * machines because the time window for this to happen is very small. 456 * machines because the time window for this to happen is very small.
456 */ 457 */
457 for (;;) { 458 while (radeon_crtc->enabled && repcnt--) {
458 /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank 459 /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
459 * start in hpos, and to the "fudged earlier" vblank start in 460 * start in hpos, and to the "fudged earlier" vblank start in
460 * vpos. 461 * vpos.
@@ -472,10 +473,22 @@ static void radeon_flip_work_func(struct work_struct *__work)
472 /* Sleep at least until estimated real start of hw vblank */ 473 /* Sleep at least until estimated real start of hw vblank */
473 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 474 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
474 min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); 475 min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
476 if (min_udelay > vblank->framedur_ns / 2000) {
477 /* Don't wait ridiculously long - something is wrong */
478 repcnt = 0;
479 break;
480 }
475 usleep_range(min_udelay, 2 * min_udelay); 481 usleep_range(min_udelay, 2 * min_udelay);
476 spin_lock_irqsave(&crtc->dev->event_lock, flags); 482 spin_lock_irqsave(&crtc->dev->event_lock, flags);
477 }; 483 };
478 484
485 if (!repcnt)
486 DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
487 "framedur %d, linedur %d, stat %d, vpos %d, "
488 "hpos %d\n", work->crtc_id, min_udelay,
489 vblank->framedur_ns / 1000,
490 vblank->linedur_ns / 1000, stat, vpos, hpos);
491
479 /* do the flip (mmio) */ 492 /* do the flip (mmio) */
480 radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base); 493 radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
481 494
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 460c8f2989da..0f14d897baf9 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -276,8 +276,12 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
276 if (rdev->irq.installed) { 276 if (rdev->irq.installed) {
277 for (i = 0; i < rdev->num_crtc; i++) { 277 for (i = 0; i < rdev->num_crtc; i++) {
278 if (rdev->pm.active_crtcs & (1 << i)) { 278 if (rdev->pm.active_crtcs & (1 << i)) {
279 rdev->pm.req_vblank |= (1 << i); 279 /* This can fail if a modeset is in progress */
280 drm_vblank_get(rdev->ddev, i); 280 if (drm_vblank_get(rdev->ddev, i) == 0)
281 rdev->pm.req_vblank |= (1 << i);
282 else
283 DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n",
284 i);
281 } 285 }
282 } 286 }
283 } 287 }
@@ -1075,12 +1079,6 @@ force:
1075 1079
1076 /* update display watermarks based on new power state */ 1080 /* update display watermarks based on new power state */
1077 radeon_bandwidth_update(rdev); 1081 radeon_bandwidth_update(rdev);
1078 /* update displays */
1079 radeon_dpm_display_configuration_changed(rdev);
1080
1081 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
1082 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
1083 rdev->pm.dpm.single_display = single_display;
1084 1082
1085 /* wait for the rings to drain */ 1083 /* wait for the rings to drain */
1086 for (i = 0; i < RADEON_NUM_RINGS; i++) { 1084 for (i = 0; i < RADEON_NUM_RINGS; i++) {
@@ -1097,6 +1095,13 @@ force:
1097 1095
1098 radeon_dpm_post_set_power_state(rdev); 1096 radeon_dpm_post_set_power_state(rdev);
1099 1097
1098 /* update displays */
1099 radeon_dpm_display_configuration_changed(rdev);
1100
1101 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
1102 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
1103 rdev->pm.dpm.single_display = single_display;
1104
1100 if (rdev->asic->dpm.force_performance_level) { 1105 if (rdev->asic->dpm.force_performance_level) {
1101 if (rdev->pm.dpm.thermal_active) { 1106 if (rdev->pm.dpm.thermal_active) {
1102 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 1107 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index e34307459e50..e06ac546a90f 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -758,7 +758,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
758 0, PAGE_SIZE, 758 0, PAGE_SIZE,
759 PCI_DMA_BIDIRECTIONAL); 759 PCI_DMA_BIDIRECTIONAL);
760 if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { 760 if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
761 while (--i) { 761 while (i--) {
762 pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], 762 pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
763 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 763 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
764 gtt->ttm.dma_address[i] = 0; 764 gtt->ttm.dma_address[i] = 0;
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 18dfe3ec9a62..22278bcfc60e 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -215,7 +215,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
215 struct drm_gem_cma_object *cma_obj; 215 struct drm_gem_cma_object *cma_obj;
216 216
217 if (size == 0) 217 if (size == 0)
218 return NULL; 218 return ERR_PTR(-EINVAL);
219 219
220 /* First, try to get a vc4_bo from the kernel BO cache. */ 220 /* First, try to get a vc4_bo from the kernel BO cache. */
221 if (from_cache) { 221 if (from_cache) {
@@ -237,7 +237,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
237 if (IS_ERR(cma_obj)) { 237 if (IS_ERR(cma_obj)) {
238 DRM_ERROR("Failed to allocate from CMA:\n"); 238 DRM_ERROR("Failed to allocate from CMA:\n");
239 vc4_bo_stats_dump(vc4); 239 vc4_bo_stats_dump(vc4);
240 return NULL; 240 return ERR_PTR(-ENOMEM);
241 } 241 }
242 } 242 }
243 243
@@ -259,8 +259,8 @@ int vc4_dumb_create(struct drm_file *file_priv,
259 args->size = args->pitch * args->height; 259 args->size = args->pitch * args->height;
260 260
261 bo = vc4_bo_create(dev, args->size, false); 261 bo = vc4_bo_create(dev, args->size, false);
262 if (!bo) 262 if (IS_ERR(bo))
263 return -ENOMEM; 263 return PTR_ERR(bo);
264 264
265 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); 265 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
266 drm_gem_object_unreference_unlocked(&bo->base.base); 266 drm_gem_object_unreference_unlocked(&bo->base.base);
@@ -443,8 +443,8 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
443 * get zeroed, and that might leak data between users. 443 * get zeroed, and that might leak data between users.
444 */ 444 */
445 bo = vc4_bo_create(dev, args->size, false); 445 bo = vc4_bo_create(dev, args->size, false);
446 if (!bo) 446 if (IS_ERR(bo))
447 return -ENOMEM; 447 return PTR_ERR(bo);
448 448
449 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); 449 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
450 drm_gem_object_unreference_unlocked(&bo->base.base); 450 drm_gem_object_unreference_unlocked(&bo->base.base);
@@ -496,8 +496,8 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
496 } 496 }
497 497
498 bo = vc4_bo_create(dev, args->size, true); 498 bo = vc4_bo_create(dev, args->size, true);
499 if (!bo) 499 if (IS_ERR(bo))
500 return -ENOMEM; 500 return PTR_ERR(bo);
501 501
502 ret = copy_from_user(bo->base.vaddr, 502 ret = copy_from_user(bo->base.vaddr,
503 (void __user *)(uintptr_t)args->data, 503 (void __user *)(uintptr_t)args->data,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 080865ec2bae..51a63330d4f8 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -91,8 +91,12 @@ struct vc4_dev {
91 struct vc4_bo *overflow_mem; 91 struct vc4_bo *overflow_mem;
92 struct work_struct overflow_mem_work; 92 struct work_struct overflow_mem_work;
93 93
94 int power_refcount;
95
96 /* Mutex controlling the power refcount. */
97 struct mutex power_lock;
98
94 struct { 99 struct {
95 uint32_t last_ct0ca, last_ct1ca;
96 struct timer_list timer; 100 struct timer_list timer;
97 struct work_struct reset_work; 101 struct work_struct reset_work;
98 } hangcheck; 102 } hangcheck;
@@ -142,6 +146,7 @@ struct vc4_seqno_cb {
142}; 146};
143 147
144struct vc4_v3d { 148struct vc4_v3d {
149 struct vc4_dev *vc4;
145 struct platform_device *pdev; 150 struct platform_device *pdev;
146 void __iomem *regs; 151 void __iomem *regs;
147}; 152};
@@ -192,6 +197,11 @@ struct vc4_exec_info {
192 /* Sequence number for this bin/render job. */ 197 /* Sequence number for this bin/render job. */
193 uint64_t seqno; 198 uint64_t seqno;
194 199
200 /* Last current addresses the hardware was processing when the
201 * hangcheck timer checked on us.
202 */
203 uint32_t last_ct0ca, last_ct1ca;
204
195 /* Kernel-space copy of the ioctl arguments */ 205 /* Kernel-space copy of the ioctl arguments */
196 struct drm_vc4_submit_cl *args; 206 struct drm_vc4_submit_cl *args;
197 207
@@ -434,7 +444,6 @@ void vc4_plane_async_set_fb(struct drm_plane *plane,
434extern struct platform_driver vc4_v3d_driver; 444extern struct platform_driver vc4_v3d_driver;
435int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused); 445int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
436int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused); 446int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
437int vc4_v3d_set_power(struct vc4_dev *vc4, bool on);
438 447
439/* vc4_validate.c */ 448/* vc4_validate.c */
440int 449int
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 48ce30a6f4b5..202aa1544acc 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/pm_runtime.h>
26#include <linux/device.h> 27#include <linux/device.h>
27#include <linux/io.h> 28#include <linux/io.h>
28 29
@@ -228,8 +229,16 @@ vc4_reset(struct drm_device *dev)
228 struct vc4_dev *vc4 = to_vc4_dev(dev); 229 struct vc4_dev *vc4 = to_vc4_dev(dev);
229 230
230 DRM_INFO("Resetting GPU.\n"); 231 DRM_INFO("Resetting GPU.\n");
231 vc4_v3d_set_power(vc4, false); 232
232 vc4_v3d_set_power(vc4, true); 233 mutex_lock(&vc4->power_lock);
234 if (vc4->power_refcount) {
235 /* Power the device off and back on the by dropping the
236 * reference on runtime PM.
237 */
238 pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
239 pm_runtime_get_sync(&vc4->v3d->pdev->dev);
240 }
241 mutex_unlock(&vc4->power_lock);
233 242
234 vc4_irq_reset(dev); 243 vc4_irq_reset(dev);
235 244
@@ -257,10 +266,17 @@ vc4_hangcheck_elapsed(unsigned long data)
257 struct drm_device *dev = (struct drm_device *)data; 266 struct drm_device *dev = (struct drm_device *)data;
258 struct vc4_dev *vc4 = to_vc4_dev(dev); 267 struct vc4_dev *vc4 = to_vc4_dev(dev);
259 uint32_t ct0ca, ct1ca; 268 uint32_t ct0ca, ct1ca;
269 unsigned long irqflags;
270 struct vc4_exec_info *exec;
271
272 spin_lock_irqsave(&vc4->job_lock, irqflags);
273 exec = vc4_first_job(vc4);
260 274
261 /* If idle, we can stop watching for hangs. */ 275 /* If idle, we can stop watching for hangs. */
262 if (list_empty(&vc4->job_list)) 276 if (!exec) {
277 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
263 return; 278 return;
279 }
264 280
265 ct0ca = V3D_READ(V3D_CTNCA(0)); 281 ct0ca = V3D_READ(V3D_CTNCA(0));
266 ct1ca = V3D_READ(V3D_CTNCA(1)); 282 ct1ca = V3D_READ(V3D_CTNCA(1));
@@ -268,14 +284,16 @@ vc4_hangcheck_elapsed(unsigned long data)
268 /* If we've made any progress in execution, rearm the timer 284 /* If we've made any progress in execution, rearm the timer
269 * and wait. 285 * and wait.
270 */ 286 */
271 if (ct0ca != vc4->hangcheck.last_ct0ca || 287 if (ct0ca != exec->last_ct0ca || ct1ca != exec->last_ct1ca) {
272 ct1ca != vc4->hangcheck.last_ct1ca) { 288 exec->last_ct0ca = ct0ca;
273 vc4->hangcheck.last_ct0ca = ct0ca; 289 exec->last_ct1ca = ct1ca;
274 vc4->hangcheck.last_ct1ca = ct1ca; 290 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
275 vc4_queue_hangcheck(dev); 291 vc4_queue_hangcheck(dev);
276 return; 292 return;
277 } 293 }
278 294
295 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
296
279 /* We've gone too long with no progress, reset. This has to 297 /* We've gone too long with no progress, reset. This has to
280 * be done from a work struct, since resetting can sleep and 298 * be done from a work struct, since resetting can sleep and
281 * this timer hook isn't allowed to. 299 * this timer hook isn't allowed to.
@@ -340,12 +358,7 @@ vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
340 finish_wait(&vc4->job_wait_queue, &wait); 358 finish_wait(&vc4->job_wait_queue, &wait);
341 trace_vc4_wait_for_seqno_end(dev, seqno); 359 trace_vc4_wait_for_seqno_end(dev, seqno);
342 360
343 if (ret && ret != -ERESTARTSYS) { 361 return ret;
344 DRM_ERROR("timeout waiting for render thread idle\n");
345 return ret;
346 }
347
348 return 0;
349} 362}
350 363
351static void 364static void
@@ -578,9 +591,9 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
578 } 591 }
579 592
580 bo = vc4_bo_create(dev, exec_size, true); 593 bo = vc4_bo_create(dev, exec_size, true);
581 if (!bo) { 594 if (IS_ERR(bo)) {
582 DRM_ERROR("Couldn't allocate BO for binning\n"); 595 DRM_ERROR("Couldn't allocate BO for binning\n");
583 ret = -ENOMEM; 596 ret = PTR_ERR(bo);
584 goto fail; 597 goto fail;
585 } 598 }
586 exec->exec_bo = &bo->base; 599 exec->exec_bo = &bo->base;
@@ -617,6 +630,7 @@ fail:
617static void 630static void
618vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) 631vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
619{ 632{
633 struct vc4_dev *vc4 = to_vc4_dev(dev);
620 unsigned i; 634 unsigned i;
621 635
622 /* Need the struct lock for drm_gem_object_unreference(). */ 636 /* Need the struct lock for drm_gem_object_unreference(). */
@@ -635,6 +649,11 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
635 } 649 }
636 mutex_unlock(&dev->struct_mutex); 650 mutex_unlock(&dev->struct_mutex);
637 651
652 mutex_lock(&vc4->power_lock);
653 if (--vc4->power_refcount == 0)
654 pm_runtime_put(&vc4->v3d->pdev->dev);
655 mutex_unlock(&vc4->power_lock);
656
638 kfree(exec); 657 kfree(exec);
639} 658}
640 659
@@ -746,6 +765,9 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
746 struct drm_gem_object *gem_obj; 765 struct drm_gem_object *gem_obj;
747 struct vc4_bo *bo; 766 struct vc4_bo *bo;
748 767
768 if (args->pad != 0)
769 return -EINVAL;
770
749 gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle); 771 gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
750 if (!gem_obj) { 772 if (!gem_obj) {
751 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); 773 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
@@ -772,7 +794,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
772 struct vc4_dev *vc4 = to_vc4_dev(dev); 794 struct vc4_dev *vc4 = to_vc4_dev(dev);
773 struct drm_vc4_submit_cl *args = data; 795 struct drm_vc4_submit_cl *args = data;
774 struct vc4_exec_info *exec; 796 struct vc4_exec_info *exec;
775 int ret; 797 int ret = 0;
776 798
777 if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) { 799 if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) {
778 DRM_ERROR("Unknown flags: 0x%02x\n", args->flags); 800 DRM_ERROR("Unknown flags: 0x%02x\n", args->flags);
@@ -785,6 +807,15 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
785 return -ENOMEM; 807 return -ENOMEM;
786 } 808 }
787 809
810 mutex_lock(&vc4->power_lock);
811 if (vc4->power_refcount++ == 0)
812 ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
813 mutex_unlock(&vc4->power_lock);
814 if (ret < 0) {
815 kfree(exec);
816 return ret;
817 }
818
788 exec->args = args; 819 exec->args = args;
789 INIT_LIST_HEAD(&exec->unref_list); 820 INIT_LIST_HEAD(&exec->unref_list);
790 821
@@ -839,6 +870,8 @@ vc4_gem_init(struct drm_device *dev)
839 (unsigned long)dev); 870 (unsigned long)dev);
840 871
841 INIT_WORK(&vc4->job_done_work, vc4_job_done_work); 872 INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
873
874 mutex_init(&vc4->power_lock);
842} 875}
843 876
844void 877void
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index b68060e758db..78a21357fb2d 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -57,7 +57,7 @@ vc4_overflow_mem_work(struct work_struct *work)
57 struct vc4_bo *bo; 57 struct vc4_bo *bo;
58 58
59 bo = vc4_bo_create(dev, 256 * 1024, true); 59 bo = vc4_bo_create(dev, 256 * 1024, true);
60 if (!bo) { 60 if (IS_ERR(bo)) {
61 DRM_ERROR("Couldn't allocate binner overflow mem\n"); 61 DRM_ERROR("Couldn't allocate binner overflow mem\n");
62 return; 62 return;
63 } 63 }
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 8a2a312e2c1b..0f12418725e5 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -316,20 +316,11 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
316 size += xtiles * ytiles * loop_body_size; 316 size += xtiles * ytiles * loop_body_size;
317 317
318 setup->rcl = &vc4_bo_create(dev, size, true)->base; 318 setup->rcl = &vc4_bo_create(dev, size, true)->base;
319 if (!setup->rcl) 319 if (IS_ERR(setup->rcl))
320 return -ENOMEM; 320 return PTR_ERR(setup->rcl);
321 list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head, 321 list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head,
322 &exec->unref_list); 322 &exec->unref_list);
323 323
324 rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
325 rcl_u32(setup,
326 (setup->color_write ? (setup->color_write->paddr +
327 args->color_write.offset) :
328 0));
329 rcl_u16(setup, args->width);
330 rcl_u16(setup, args->height);
331 rcl_u16(setup, args->color_write.bits);
332
333 /* The tile buffer gets cleared when the previous tile is stored. If 324 /* The tile buffer gets cleared when the previous tile is stored. If
334 * the clear values changed between frames, then the tile buffer has 325 * the clear values changed between frames, then the tile buffer has
335 * stale clear values in it, so we have to do a store in None mode (no 326 * stale clear values in it, so we have to do a store in None mode (no
@@ -349,6 +340,15 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
349 rcl_u32(setup, 0); /* no address, since we're in None mode */ 340 rcl_u32(setup, 0); /* no address, since we're in None mode */
350 } 341 }
351 342
343 rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
344 rcl_u32(setup,
345 (setup->color_write ? (setup->color_write->paddr +
346 args->color_write.offset) :
347 0));
348 rcl_u16(setup, args->width);
349 rcl_u16(setup, args->height);
350 rcl_u16(setup, args->color_write.bits);
351
352 for (y = min_y_tile; y <= max_y_tile; y++) { 352 for (y = min_y_tile; y <= max_y_tile; y++) {
353 for (x = min_x_tile; x <= max_x_tile; x++) { 353 for (x = min_x_tile; x <= max_x_tile; x++) {
354 bool first = (x == min_x_tile && y == min_y_tile); 354 bool first = (x == min_x_tile && y == min_y_tile);
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 314ff71db978..31de5d17bc85 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -17,6 +17,7 @@
17 */ 17 */
18 18
19#include "linux/component.h" 19#include "linux/component.h"
20#include "linux/pm_runtime.h"
20#include "vc4_drv.h" 21#include "vc4_drv.h"
21#include "vc4_regs.h" 22#include "vc4_regs.h"
22 23
@@ -144,18 +145,6 @@ int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
144} 145}
145#endif /* CONFIG_DEBUG_FS */ 146#endif /* CONFIG_DEBUG_FS */
146 147
147int
148vc4_v3d_set_power(struct vc4_dev *vc4, bool on)
149{
150 /* XXX: This interface is needed for GPU reset, and the way to
151 * do it is to turn our power domain off and back on. We
152 * can't just reset from within the driver, because the reset
153 * bits are in the power domain's register area, and get set
154 * during the poweron process.
155 */
156 return 0;
157}
158
159static void vc4_v3d_init_hw(struct drm_device *dev) 148static void vc4_v3d_init_hw(struct drm_device *dev)
160{ 149{
161 struct vc4_dev *vc4 = to_vc4_dev(dev); 150 struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -167,6 +156,29 @@ static void vc4_v3d_init_hw(struct drm_device *dev)
167 V3D_WRITE(V3D_VPMBASE, 0); 156 V3D_WRITE(V3D_VPMBASE, 0);
168} 157}
169 158
159#ifdef CONFIG_PM
160static int vc4_v3d_runtime_suspend(struct device *dev)
161{
162 struct vc4_v3d *v3d = dev_get_drvdata(dev);
163 struct vc4_dev *vc4 = v3d->vc4;
164
165 vc4_irq_uninstall(vc4->dev);
166
167 return 0;
168}
169
170static int vc4_v3d_runtime_resume(struct device *dev)
171{
172 struct vc4_v3d *v3d = dev_get_drvdata(dev);
173 struct vc4_dev *vc4 = v3d->vc4;
174
175 vc4_v3d_init_hw(vc4->dev);
176 vc4_irq_postinstall(vc4->dev);
177
178 return 0;
179}
180#endif
181
170static int vc4_v3d_bind(struct device *dev, struct device *master, void *data) 182static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
171{ 183{
172 struct platform_device *pdev = to_platform_device(dev); 184 struct platform_device *pdev = to_platform_device(dev);
@@ -179,6 +191,8 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
179 if (!v3d) 191 if (!v3d)
180 return -ENOMEM; 192 return -ENOMEM;
181 193
194 dev_set_drvdata(dev, v3d);
195
182 v3d->pdev = pdev; 196 v3d->pdev = pdev;
183 197
184 v3d->regs = vc4_ioremap_regs(pdev, 0); 198 v3d->regs = vc4_ioremap_regs(pdev, 0);
@@ -186,6 +200,7 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
186 return PTR_ERR(v3d->regs); 200 return PTR_ERR(v3d->regs);
187 201
188 vc4->v3d = v3d; 202 vc4->v3d = v3d;
203 v3d->vc4 = vc4;
189 204
190 if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) { 205 if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) {
191 DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n", 206 DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n",
@@ -207,6 +222,8 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
207 return ret; 222 return ret;
208 } 223 }
209 224
225 pm_runtime_enable(dev);
226
210 return 0; 227 return 0;
211} 228}
212 229
@@ -216,6 +233,8 @@ static void vc4_v3d_unbind(struct device *dev, struct device *master,
216 struct drm_device *drm = dev_get_drvdata(master); 233 struct drm_device *drm = dev_get_drvdata(master);
217 struct vc4_dev *vc4 = to_vc4_dev(drm); 234 struct vc4_dev *vc4 = to_vc4_dev(drm);
218 235
236 pm_runtime_disable(dev);
237
219 drm_irq_uninstall(drm); 238 drm_irq_uninstall(drm);
220 239
221 /* Disable the binner's overflow memory address, so the next 240 /* Disable the binner's overflow memory address, so the next
@@ -228,6 +247,10 @@ static void vc4_v3d_unbind(struct device *dev, struct device *master,
228 vc4->v3d = NULL; 247 vc4->v3d = NULL;
229} 248}
230 249
250static const struct dev_pm_ops vc4_v3d_pm_ops = {
251 SET_RUNTIME_PM_OPS(vc4_v3d_runtime_suspend, vc4_v3d_runtime_resume, NULL)
252};
253
231static const struct component_ops vc4_v3d_ops = { 254static const struct component_ops vc4_v3d_ops = {
232 .bind = vc4_v3d_bind, 255 .bind = vc4_v3d_bind,
233 .unbind = vc4_v3d_unbind, 256 .unbind = vc4_v3d_unbind,
@@ -255,5 +278,6 @@ struct platform_driver vc4_v3d_driver = {
255 .driver = { 278 .driver = {
256 .name = "vc4_v3d", 279 .name = "vc4_v3d",
257 .of_match_table = vc4_v3d_dt_match, 280 .of_match_table = vc4_v3d_dt_match,
281 .pm = &vc4_v3d_pm_ops,
258 }, 282 },
259}; 283};
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index e26d9f6face3..24c2c746e8f3 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -401,8 +401,8 @@ validate_tile_binning_config(VALIDATE_ARGS)
401 tile_bo = vc4_bo_create(dev, exec->tile_alloc_offset + tile_alloc_size, 401 tile_bo = vc4_bo_create(dev, exec->tile_alloc_offset + tile_alloc_size,
402 true); 402 true);
403 exec->tile_bo = &tile_bo->base; 403 exec->tile_bo = &tile_bo->base;
404 if (!exec->tile_bo) 404 if (IS_ERR(exec->tile_bo))
405 return -ENOMEM; 405 return PTR_ERR(exec->tile_bo);
406 list_add_tail(&tile_bo->unref_head, &exec->unref_list); 406 list_add_tail(&tile_bo->unref_head, &exec->unref_list);
407 407
408 /* tile alloc address. */ 408 /* tile alloc address. */
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index da462afcb225..dd2dbb9746ce 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -18,6 +18,7 @@
18#include <linux/host1x.h> 18#include <linux/host1x.h>
19#include <linux/of.h> 19#include <linux/of.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/of_device.h>
21 22
22#include "bus.h" 23#include "bus.h"
23#include "dev.h" 24#include "dev.h"
@@ -394,6 +395,7 @@ static int host1x_device_add(struct host1x *host1x,
394 device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask; 395 device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
395 device->dev.dma_mask = &device->dev.coherent_dma_mask; 396 device->dev.dma_mask = &device->dev.coherent_dma_mask;
396 dev_set_name(&device->dev, "%s", driver->driver.name); 397 dev_set_name(&device->dev, "%s", driver->driver.name);
398 of_dma_configure(&device->dev, host1x->dev->of_node);
397 device->dev.release = host1x_device_release; 399 device->dev.release = host1x_device_release;
398 device->dev.bus = &host1x_bus_type; 400 device->dev.bus = &host1x_bus_type;
399 device->dev.parent = host1x->dev; 401 device->dev.parent = host1x->dev;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 314bf3718cc7..ff348690df94 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -23,6 +23,7 @@
23#include <linux/of_device.h> 23#include <linux/of_device.h>
24#include <linux/clk.h> 24#include <linux/clk.h>
25#include <linux/io.h> 25#include <linux/io.h>
26#include <linux/dma-mapping.h>
26 27
27#define CREATE_TRACE_POINTS 28#define CREATE_TRACE_POINTS
28#include <trace/events/host1x.h> 29#include <trace/events/host1x.h>
@@ -68,6 +69,7 @@ static const struct host1x_info host1x01_info = {
68 .nb_bases = 8, 69 .nb_bases = 8,
69 .init = host1x01_init, 70 .init = host1x01_init,
70 .sync_offset = 0x3000, 71 .sync_offset = 0x3000,
72 .dma_mask = DMA_BIT_MASK(32),
71}; 73};
72 74
73static const struct host1x_info host1x02_info = { 75static const struct host1x_info host1x02_info = {
@@ -77,6 +79,7 @@ static const struct host1x_info host1x02_info = {
77 .nb_bases = 12, 79 .nb_bases = 12,
78 .init = host1x02_init, 80 .init = host1x02_init,
79 .sync_offset = 0x3000, 81 .sync_offset = 0x3000,
82 .dma_mask = DMA_BIT_MASK(32),
80}; 83};
81 84
82static const struct host1x_info host1x04_info = { 85static const struct host1x_info host1x04_info = {
@@ -86,6 +89,7 @@ static const struct host1x_info host1x04_info = {
86 .nb_bases = 64, 89 .nb_bases = 64,
87 .init = host1x04_init, 90 .init = host1x04_init,
88 .sync_offset = 0x2100, 91 .sync_offset = 0x2100,
92 .dma_mask = DMA_BIT_MASK(34),
89}; 93};
90 94
91static const struct host1x_info host1x05_info = { 95static const struct host1x_info host1x05_info = {
@@ -95,6 +99,7 @@ static const struct host1x_info host1x05_info = {
95 .nb_bases = 64, 99 .nb_bases = 64,
96 .init = host1x05_init, 100 .init = host1x05_init,
97 .sync_offset = 0x2100, 101 .sync_offset = 0x2100,
102 .dma_mask = DMA_BIT_MASK(34),
98}; 103};
99 104
100static struct of_device_id host1x_of_match[] = { 105static struct of_device_id host1x_of_match[] = {
@@ -148,6 +153,8 @@ static int host1x_probe(struct platform_device *pdev)
148 if (IS_ERR(host->regs)) 153 if (IS_ERR(host->regs))
149 return PTR_ERR(host->regs); 154 return PTR_ERR(host->regs);
150 155
156 dma_set_mask_and_coherent(host->dev, host->info->dma_mask);
157
151 if (host->info->init) { 158 if (host->info->init) {
152 err = host->info->init(host); 159 err = host->info->init(host);
153 if (err) 160 if (err)
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index 0b6e8e9629c5..dace124994bb 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -96,6 +96,7 @@ struct host1x_info {
96 int nb_mlocks; /* host1x: number of mlocks */ 96 int nb_mlocks; /* host1x: number of mlocks */
97 int (*init)(struct host1x *); /* initialize per SoC ops */ 97 int (*init)(struct host1x *); /* initialize per SoC ops */
98 int sync_offset; 98 int sync_offset;
99 u64 dma_mask; /* mask of addressable memory */
99}; 100};
100 101
101struct host1x { 102struct host1x {
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
index f155b8380481..2b3105c8aed3 100644
--- a/drivers/hwmon/ads1015.c
+++ b/drivers/hwmon/ads1015.c
@@ -126,7 +126,7 @@ static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
126 struct ads1015_data *data = i2c_get_clientdata(client); 126 struct ads1015_data *data = i2c_get_clientdata(client);
127 unsigned int pga = data->channel_data[channel].pga; 127 unsigned int pga = data->channel_data[channel].pga;
128 int fullscale = fullscale_table[pga]; 128 int fullscale = fullscale_table[pga];
129 const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0; 129 const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
130 130
131 return DIV_ROUND_CLOSEST(reg * fullscale, mask); 131 return DIV_ROUND_CLOSEST(reg * fullscale, mask);
132} 132}
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 82de3deeb18a..685568b1236d 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -406,16 +406,11 @@ static int gpio_fan_get_cur_state(struct thermal_cooling_device *cdev,
406 unsigned long *state) 406 unsigned long *state)
407{ 407{
408 struct gpio_fan_data *fan_data = cdev->devdata; 408 struct gpio_fan_data *fan_data = cdev->devdata;
409 int r;
410 409
411 if (!fan_data) 410 if (!fan_data)
412 return -EINVAL; 411 return -EINVAL;
413 412
414 r = get_fan_speed_index(fan_data); 413 *state = fan_data->speed_index;
415 if (r < 0)
416 return r;
417
418 *state = r;
419 return 0; 414 return 0;
420} 415}
421 416
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 3711df1d4526..4a45408dd820 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -586,8 +586,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
586 if (!dev) 586 if (!dev)
587 return -ENOMEM; 587 return -ENOMEM;
588 588
589 dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(struct bsc_regs *), 589 dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(*dev->bsc_regmap), GFP_KERNEL);
590 GFP_KERNEL);
591 if (!dev->bsc_regmap) 590 if (!dev->bsc_regmap)
592 return -ENOMEM; 591 return -ENOMEM;
593 592
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index f62d69799a9c..27fa0cb09538 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1271,6 +1271,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1271 switch (dev->device) { 1271 switch (dev->device) {
1272 case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS: 1272 case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS:
1273 case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS: 1273 case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS:
1274 case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
1275 case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
1274 case PCI_DEVICE_ID_INTEL_DNV_SMBUS: 1276 case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
1275 priv->features |= FEATURE_I2C_BLOCK_READ; 1277 priv->features |= FEATURE_I2C_BLOCK_READ;
1276 priv->features |= FEATURE_IRQ; 1278 priv->features |= FEATURE_IRQ;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 08d26ba61ed3..13c45296ce5b 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1450,7 +1450,8 @@ omap_i2c_probe(struct platform_device *pdev)
1450 1450
1451err_unuse_clocks: 1451err_unuse_clocks:
1452 omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0); 1452 omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
1453 pm_runtime_put(omap->dev); 1453 pm_runtime_dont_use_autosuspend(omap->dev);
1454 pm_runtime_put_sync(omap->dev);
1454 pm_runtime_disable(&pdev->dev); 1455 pm_runtime_disable(&pdev->dev);
1455err_free_mem: 1456err_free_mem:
1456 1457
@@ -1468,6 +1469,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
1468 return ret; 1469 return ret;
1469 1470
1470 omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0); 1471 omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
1472 pm_runtime_dont_use_autosuspend(&pdev->dev);
1471 pm_runtime_put_sync(&pdev->dev); 1473 pm_runtime_put_sync(&pdev->dev);
1472 pm_runtime_disable(&pdev->dev); 1474 pm_runtime_disable(&pdev->dev);
1473 return 0; 1475 return 0;
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index f3e5ff8522f0..213ba55e17c3 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -467,7 +467,7 @@ static int uniphier_fi2c_clk_init(struct device *dev,
467 bus_speed = UNIPHIER_FI2C_DEFAULT_SPEED; 467 bus_speed = UNIPHIER_FI2C_DEFAULT_SPEED;
468 468
469 if (!bus_speed) { 469 if (!bus_speed) {
470 dev_err(dev, "clock-freqyency should not be zero\n"); 470 dev_err(dev, "clock-frequency should not be zero\n");
471 return -EINVAL; 471 return -EINVAL;
472 } 472 }
473 473
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index 1f4f3f53819c..89eaa8a7e1e0 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -328,7 +328,7 @@ static int uniphier_i2c_clk_init(struct device *dev,
328 bus_speed = UNIPHIER_I2C_DEFAULT_SPEED; 328 bus_speed = UNIPHIER_I2C_DEFAULT_SPEED;
329 329
330 if (!bus_speed) { 330 if (!bus_speed) {
331 dev_err(dev, "clock-freqyency should not be zero\n"); 331 dev_err(dev, "clock-frequency should not be zero\n");
332 return -EINVAL; 332 return -EINVAL;
333 } 333 }
334 334
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 00da80e02154..94b80a51ab68 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -358,6 +358,7 @@ int ib_register_device(struct ib_device *device,
358 ret = device->query_device(device, &device->attrs, &uhw); 358 ret = device->query_device(device, &device->attrs, &uhw);
359 if (ret) { 359 if (ret) {
360 printk(KERN_WARNING "Couldn't query the device attributes\n"); 360 printk(KERN_WARNING "Couldn't query the device attributes\n");
361 ib_cache_cleanup_one(device);
361 goto out; 362 goto out;
362 } 363 }
363 364
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index f334090bb612..1e37f3515d98 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1071,7 +1071,7 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
1071 } 1071 }
1072 } 1072 }
1073 1073
1074 if (rec->hop_limit > 1 || use_roce) { 1074 if (rec->hop_limit > 0 || use_roce) {
1075 ah_attr->ah_flags = IB_AH_GRH; 1075 ah_attr->ah_flags = IB_AH_GRH;
1076 ah_attr->grh.dgid = rec->dgid; 1076 ah_attr->grh.dgid = rec->dgid;
1077 1077
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 6ffc9c4e93af..6c6fbff19752 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1970,7 +1970,8 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1970 resp_size); 1970 resp_size);
1971 INIT_UDATA(&uhw, buf + sizeof(cmd), 1971 INIT_UDATA(&uhw, buf + sizeof(cmd),
1972 (unsigned long)cmd.response + resp_size, 1972 (unsigned long)cmd.response + resp_size,
1973 in_len - sizeof(cmd), out_len - resp_size); 1973 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1974 out_len - resp_size);
1974 1975
1975 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1976 memset(&cmd_ex, 0, sizeof(cmd_ex));
1976 cmd_ex.user_handle = cmd.user_handle; 1977 cmd_ex.user_handle = cmd.user_handle;
@@ -3413,7 +3414,8 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
3413 3414
3414 INIT_UDATA(&udata, buf + sizeof cmd, 3415 INIT_UDATA(&udata, buf + sizeof cmd,
3415 (unsigned long) cmd.response + sizeof resp, 3416 (unsigned long) cmd.response + sizeof resp,
3416 in_len - sizeof cmd, out_len - sizeof resp); 3417 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
3418 out_len - sizeof resp);
3417 3419
3418 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata); 3420 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
3419 if (ret) 3421 if (ret)
@@ -3439,7 +3441,8 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
3439 3441
3440 INIT_UDATA(&udata, buf + sizeof cmd, 3442 INIT_UDATA(&udata, buf + sizeof cmd,
3441 (unsigned long) cmd.response + sizeof resp, 3443 (unsigned long) cmd.response + sizeof resp,
3442 in_len - sizeof cmd, out_len - sizeof resp); 3444 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
3445 out_len - sizeof resp);
3443 3446
3444 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata); 3447 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
3445 if (ret) 3448 if (ret)
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 26833bfa639b..d68f506c1922 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -817,17 +817,48 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
817 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 817 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
818} 818}
819 819
820static void edit_counter(struct mlx4_counter *cnt, 820static void edit_counter(struct mlx4_counter *cnt, void *counters,
821 struct ib_pma_portcounters *pma_cnt) 821 __be16 attr_id)
822{ 822{
823 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data, 823 switch (attr_id) {
824 (be64_to_cpu(cnt->tx_bytes) >> 2)); 824 case IB_PMA_PORT_COUNTERS:
825 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data, 825 {
826 (be64_to_cpu(cnt->rx_bytes) >> 2)); 826 struct ib_pma_portcounters *pma_cnt =
827 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets, 827 (struct ib_pma_portcounters *)counters;
828 be64_to_cpu(cnt->tx_frames)); 828
829 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets, 829 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
830 be64_to_cpu(cnt->rx_frames)); 830 (be64_to_cpu(cnt->tx_bytes) >> 2));
831 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
832 (be64_to_cpu(cnt->rx_bytes) >> 2));
833 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
834 be64_to_cpu(cnt->tx_frames));
835 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
836 be64_to_cpu(cnt->rx_frames));
837 break;
838 }
839 case IB_PMA_PORT_COUNTERS_EXT:
840 {
841 struct ib_pma_portcounters_ext *pma_cnt_ext =
842 (struct ib_pma_portcounters_ext *)counters;
843
844 pma_cnt_ext->port_xmit_data =
845 cpu_to_be64(be64_to_cpu(cnt->tx_bytes) >> 2);
846 pma_cnt_ext->port_rcv_data =
847 cpu_to_be64(be64_to_cpu(cnt->rx_bytes) >> 2);
848 pma_cnt_ext->port_xmit_packets = cnt->tx_frames;
849 pma_cnt_ext->port_rcv_packets = cnt->rx_frames;
850 break;
851 }
852 }
853}
854
855static int iboe_process_mad_port_info(void *out_mad)
856{
857 struct ib_class_port_info cpi = {};
858
859 cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
860 memcpy(out_mad, &cpi, sizeof(cpi));
861 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
831} 862}
832 863
833static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 864static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
@@ -842,6 +873,9 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
842 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT) 873 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
843 return -EINVAL; 874 return -EINVAL;
844 875
876 if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)
877 return iboe_process_mad_port_info((void *)(out_mad->data + 40));
878
845 memset(&counter_stats, 0, sizeof(counter_stats)); 879 memset(&counter_stats, 0, sizeof(counter_stats));
846 mutex_lock(&dev->counters_table[port_num - 1].mutex); 880 mutex_lock(&dev->counters_table[port_num - 1].mutex);
847 list_for_each_entry(tmp_counter, 881 list_for_each_entry(tmp_counter,
@@ -863,7 +897,8 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
863 switch (counter_stats.counter_mode & 0xf) { 897 switch (counter_stats.counter_mode & 0xf) {
864 case 0: 898 case 0:
865 edit_counter(&counter_stats, 899 edit_counter(&counter_stats,
866 (void *)(out_mad->data + 40)); 900 (void *)(out_mad->data + 40),
901 in_mad->mad_hdr.attr_id);
867 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 902 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
868 break; 903 break;
869 default: 904 default:
@@ -894,8 +929,10 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
894 */ 929 */
895 if (link == IB_LINK_LAYER_INFINIBAND) { 930 if (link == IB_LINK_LAYER_INFINIBAND) {
896 if (mlx4_is_slave(dev->dev) && 931 if (mlx4_is_slave(dev->dev) &&
897 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && 932 (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
898 in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS) 933 (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
934 in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
935 in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
899 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, 936 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
900 in_grh, in_mad, out_mad); 937 in_grh, in_mad, out_mad);
901 938
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index bc5536f00b6c..fd97534762b8 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1681,9 +1681,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1681 } 1681 }
1682 1682
1683 if (qp->ibqp.uobject) 1683 if (qp->ibqp.uobject)
1684 context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index); 1684 context->usr_page = cpu_to_be32(
1685 mlx4_to_hw_uar_index(dev->dev,
1686 to_mucontext(ibqp->uobject->context)->uar.index));
1685 else 1687 else
1686 context->usr_page = cpu_to_be32(dev->priv_uar.index); 1688 context->usr_page = cpu_to_be32(
1689 mlx4_to_hw_uar_index(dev->dev, dev->priv_uar.index));
1687 1690
1688 if (attr_mask & IB_QP_DEST_QPN) 1691 if (attr_mask & IB_QP_DEST_QPN)
1689 context->remote_qpn = cpu_to_be32(attr->dest_qp_num); 1692 context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 4659256cd95e..3b2ddd64a371 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -75,7 +75,8 @@ static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
75 75
76static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, 76static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
77 struct mlx5_create_srq_mbox_in **in, 77 struct mlx5_create_srq_mbox_in **in,
78 struct ib_udata *udata, int buf_size, int *inlen) 78 struct ib_udata *udata, int buf_size, int *inlen,
79 int is_xrc)
79{ 80{
80 struct mlx5_ib_dev *dev = to_mdev(pd->device); 81 struct mlx5_ib_dev *dev = to_mdev(pd->device);
81 struct mlx5_ib_create_srq ucmd = {}; 82 struct mlx5_ib_create_srq ucmd = {};
@@ -87,13 +88,8 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
87 int ncont; 88 int ncont;
88 u32 offset; 89 u32 offset;
89 u32 uidx = MLX5_IB_DEFAULT_UIDX; 90 u32 uidx = MLX5_IB_DEFAULT_UIDX;
90 int drv_data = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
91 91
92 if (drv_data < 0) 92 ucmdlen = min(udata->inlen, sizeof(ucmd));
93 return -EINVAL;
94
95 ucmdlen = (drv_data < sizeof(ucmd)) ?
96 drv_data : sizeof(ucmd);
97 93
98 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) { 94 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
99 mlx5_ib_dbg(dev, "failed copy udata\n"); 95 mlx5_ib_dbg(dev, "failed copy udata\n");
@@ -103,15 +99,17 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
103 if (ucmd.reserved0 || ucmd.reserved1) 99 if (ucmd.reserved0 || ucmd.reserved1)
104 return -EINVAL; 100 return -EINVAL;
105 101
106 if (drv_data > sizeof(ucmd) && 102 if (udata->inlen > sizeof(ucmd) &&
107 !ib_is_udata_cleared(udata, sizeof(ucmd), 103 !ib_is_udata_cleared(udata, sizeof(ucmd),
108 drv_data - sizeof(ucmd))) 104 udata->inlen - sizeof(ucmd)))
109 return -EINVAL; 105 return -EINVAL;
110 106
111 err = get_srq_user_index(to_mucontext(pd->uobject->context), 107 if (is_xrc) {
112 &ucmd, udata->inlen, &uidx); 108 err = get_srq_user_index(to_mucontext(pd->uobject->context),
113 if (err) 109 &ucmd, udata->inlen, &uidx);
114 return err; 110 if (err)
111 return err;
112 }
115 113
116 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); 114 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
117 115
@@ -151,7 +149,8 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
151 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; 149 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
152 (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26); 150 (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
153 151
154 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) { 152 if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
153 is_xrc){
155 xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in, 154 xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
156 xrc_srq_context_entry); 155 xrc_srq_context_entry);
157 MLX5_SET(xrc_srqc, xsrqc, user_index, uidx); 156 MLX5_SET(xrc_srqc, xsrqc, user_index, uidx);
@@ -170,7 +169,7 @@ err_umem:
170 169
171static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, 170static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
172 struct mlx5_create_srq_mbox_in **in, int buf_size, 171 struct mlx5_create_srq_mbox_in **in, int buf_size,
173 int *inlen) 172 int *inlen, int is_xrc)
174{ 173{
175 int err; 174 int err;
176 int i; 175 int i;
@@ -224,7 +223,8 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
224 223
225 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; 224 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
226 225
227 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) { 226 if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
227 is_xrc){
228 xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in, 228 xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
229 xrc_srq_context_entry); 229 xrc_srq_context_entry);
230 /* 0xffffff means we ask to work with cqe version 0 */ 230 /* 0xffffff means we ask to work with cqe version 0 */
@@ -302,10 +302,14 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
302 desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, 302 desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
303 srq->msrq.max_avail_gather); 303 srq->msrq.max_avail_gather);
304 304
305 is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
306
305 if (pd->uobject) 307 if (pd->uobject)
306 err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen); 308 err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen,
309 is_xrc);
307 else 310 else
308 err = create_srq_kernel(dev, srq, &in, buf_size, &inlen); 311 err = create_srq_kernel(dev, srq, &in, buf_size, &inlen,
312 is_xrc);
309 313
310 if (err) { 314 if (err) {
311 mlx5_ib_warn(dev, "create srq %s failed, err %d\n", 315 mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
@@ -313,7 +317,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
313 goto err_srq; 317 goto err_srq;
314 } 318 }
315 319
316 is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
317 in->ctx.state_log_sz = ilog2(srq->msrq.max); 320 in->ctx.state_log_sz = ilog2(srq->msrq.max);
318 flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24; 321 flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24;
319 xrcdn = 0; 322 xrcdn = 0;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 040bb8b5cb15..12503f15fbd6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -323,9 +323,6 @@ struct ocrdma_cq {
323 */ 323 */
324 u32 max_hw_cqe; 324 u32 max_hw_cqe;
325 bool phase_change; 325 bool phase_change;
326 bool deferred_arm, deferred_sol;
327 bool first_arm;
328
329 spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization 326 spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization
330 * to cq polling 327 * to cq polling
331 */ 328 */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 37620b4baafb..12420e4ecf3d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1094,7 +1094,6 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
1094 spin_lock_init(&cq->comp_handler_lock); 1094 spin_lock_init(&cq->comp_handler_lock);
1095 INIT_LIST_HEAD(&cq->sq_head); 1095 INIT_LIST_HEAD(&cq->sq_head);
1096 INIT_LIST_HEAD(&cq->rq_head); 1096 INIT_LIST_HEAD(&cq->rq_head);
1097 cq->first_arm = true;
1098 1097
1099 if (ib_ctx) { 1098 if (ib_ctx) {
1100 uctx = get_ocrdma_ucontext(ib_ctx); 1099 uctx = get_ocrdma_ucontext(ib_ctx);
@@ -2910,12 +2909,9 @@ expand_cqe:
2910 } 2909 }
2911stop_cqe: 2910stop_cqe:
2912 cq->getp = cur_getp; 2911 cq->getp = cur_getp;
2913 if (cq->deferred_arm || polled_hw_cqes) { 2912
2914 ocrdma_ring_cq_db(dev, cq->id, cq->deferred_arm, 2913 if (polled_hw_cqes)
2915 cq->deferred_sol, polled_hw_cqes); 2914 ocrdma_ring_cq_db(dev, cq->id, false, false, polled_hw_cqes);
2916 cq->deferred_arm = false;
2917 cq->deferred_sol = false;
2918 }
2919 2915
2920 return i; 2916 return i;
2921} 2917}
@@ -2999,13 +2995,7 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2999 if (cq_flags & IB_CQ_SOLICITED) 2995 if (cq_flags & IB_CQ_SOLICITED)
3000 sol_needed = true; 2996 sol_needed = true;
3001 2997
3002 if (cq->first_arm) { 2998 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
3003 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
3004 cq->first_arm = false;
3005 }
3006
3007 cq->deferred_arm = true;
3008 cq->deferred_sol = sol_needed;
3009 spin_unlock_irqrestore(&cq->cq_lock, flags); 2999 spin_unlock_irqrestore(&cq->cq_lock, flags);
3010 3000
3011 return 0; 3001 return 0;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index e5e223938eec..374c129219ef 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -114,6 +114,7 @@ struct kmem_cache *amd_iommu_irq_cache;
114 114
115static void update_domain(struct protection_domain *domain); 115static void update_domain(struct protection_domain *domain);
116static int protection_domain_init(struct protection_domain *domain); 116static int protection_domain_init(struct protection_domain *domain);
117static void detach_device(struct device *dev);
117 118
118/* 119/*
119 * For dynamic growth the aperture size is split into ranges of 128MB of 120 * For dynamic growth the aperture size is split into ranges of 128MB of
@@ -384,6 +385,9 @@ static void iommu_uninit_device(struct device *dev)
384 if (!dev_data) 385 if (!dev_data)
385 return; 386 return;
386 387
388 if (dev_data->domain)
389 detach_device(dev);
390
387 iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev, 391 iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
388 dev); 392 dev);
389 393
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 013bdfff2d4d..bf4959f4225b 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -228,6 +228,10 @@ static int amd_iommu_enable_interrupts(void);
228static int __init iommu_go_to_state(enum iommu_init_state state); 228static int __init iommu_go_to_state(enum iommu_init_state state);
229static void init_device_table_dma(void); 229static void init_device_table_dma(void);
230 230
231static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
232 u8 bank, u8 cntr, u8 fxn,
233 u64 *value, bool is_write);
234
231static inline void update_last_devid(u16 devid) 235static inline void update_last_devid(u16 devid)
232{ 236{
233 if (devid > amd_iommu_last_bdf) 237 if (devid > amd_iommu_last_bdf)
@@ -1016,6 +1020,34 @@ static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1016} 1020}
1017 1021
1018/* 1022/*
1023 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1024 * Workaround:
1025 * BIOS should enable ATS write permission check by setting
1026 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1027 */
1028static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1029{
1030 u32 value;
1031
1032 if ((boot_cpu_data.x86 != 0x15) ||
1033 (boot_cpu_data.x86_model < 0x30) ||
1034 (boot_cpu_data.x86_model > 0x3f))
1035 return;
1036
1037 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1038 value = iommu_read_l2(iommu, 0x47);
1039
1040 if (value & BIT(0))
1041 return;
1042
1043 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1044 iommu_write_l2(iommu, 0x47, value | BIT(0));
1045
1046 pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
1047 dev_name(&iommu->dev->dev));
1048}
1049
1050/*
1019 * This function clues the initialization function for one IOMMU 1051 * This function clues the initialization function for one IOMMU
1020 * together and also allocates the command buffer and programs the 1052 * together and also allocates the command buffer and programs the
1021 * hardware. It does NOT enable the IOMMU. This is done afterwards. 1053 * hardware. It does NOT enable the IOMMU. This is done afterwards.
@@ -1142,8 +1174,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1142 amd_iommu_pc_present = true; 1174 amd_iommu_pc_present = true;
1143 1175
1144 /* Check if the performance counters can be written to */ 1176 /* Check if the performance counters can be written to */
1145 if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) || 1177 if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) ||
1146 (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) || 1178 (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) ||
1147 (val != val2)) { 1179 (val != val2)) {
1148 pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n"); 1180 pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
1149 amd_iommu_pc_present = false; 1181 amd_iommu_pc_present = false;
@@ -1284,6 +1316,7 @@ static int iommu_init_pci(struct amd_iommu *iommu)
1284 } 1316 }
1285 1317
1286 amd_iommu_erratum_746_workaround(iommu); 1318 amd_iommu_erratum_746_workaround(iommu);
1319 amd_iommu_ats_write_check_workaround(iommu);
1287 1320
1288 iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu, 1321 iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
1289 amd_iommu_groups, "ivhd%d", 1322 amd_iommu_groups, "ivhd%d",
@@ -2283,22 +2316,15 @@ u8 amd_iommu_pc_get_max_counters(u16 devid)
2283} 2316}
2284EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); 2317EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
2285 2318
2286int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, 2319static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
2320 u8 bank, u8 cntr, u8 fxn,
2287 u64 *value, bool is_write) 2321 u64 *value, bool is_write)
2288{ 2322{
2289 struct amd_iommu *iommu;
2290 u32 offset; 2323 u32 offset;
2291 u32 max_offset_lim; 2324 u32 max_offset_lim;
2292 2325
2293 /* Make sure the IOMMU PC resource is available */
2294 if (!amd_iommu_pc_present)
2295 return -ENODEV;
2296
2297 /* Locate the iommu associated with the device ID */
2298 iommu = amd_iommu_rlookup_table[devid];
2299
2300 /* Check for valid iommu and pc register indexing */ 2326 /* Check for valid iommu and pc register indexing */
2301 if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7))) 2327 if (WARN_ON((fxn > 0x28) || (fxn & 7)))
2302 return -ENODEV; 2328 return -ENODEV;
2303 2329
2304 offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn); 2330 offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);
@@ -2322,3 +2348,16 @@ int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
2322 return 0; 2348 return 0;
2323} 2349}
2324EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val); 2350EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
2351
2352int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
2353 u64 *value, bool is_write)
2354{
2355 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
2356
2357 /* Make sure the IOMMU PC resource is available */
2358 if (!amd_iommu_pc_present || iommu == NULL)
2359 return -ENODEV;
2360
2361 return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn,
2362 value, is_write);
2363}
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 62a400c5ba06..8ffd7568fc91 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -329,7 +329,8 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
329 /* Only care about add/remove events for physical functions */ 329 /* Only care about add/remove events for physical functions */
330 if (pdev->is_virtfn) 330 if (pdev->is_virtfn)
331 return NOTIFY_DONE; 331 return NOTIFY_DONE;
332 if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE) 332 if (action != BUS_NOTIFY_ADD_DEVICE &&
333 action != BUS_NOTIFY_REMOVED_DEVICE)
333 return NOTIFY_DONE; 334 return NOTIFY_DONE;
334 335
335 info = dmar_alloc_pci_notify_info(pdev, action); 336 info = dmar_alloc_pci_notify_info(pdev, action);
@@ -339,7 +340,7 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
339 down_write(&dmar_global_lock); 340 down_write(&dmar_global_lock);
340 if (action == BUS_NOTIFY_ADD_DEVICE) 341 if (action == BUS_NOTIFY_ADD_DEVICE)
341 dmar_pci_bus_add_dev(info); 342 dmar_pci_bus_add_dev(info);
342 else if (action == BUS_NOTIFY_DEL_DEVICE) 343 else if (action == BUS_NOTIFY_REMOVED_DEVICE)
343 dmar_pci_bus_del_dev(info); 344 dmar_pci_bus_del_dev(info);
344 up_write(&dmar_global_lock); 345 up_write(&dmar_global_lock);
345 346
@@ -1353,7 +1354,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
1353 1354
1354 raw_spin_lock_irqsave(&iommu->register_lock, flags); 1355 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1355 1356
1356 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 1357 sts = readl(iommu->reg + DMAR_GSTS_REG);
1357 if (!(sts & DMA_GSTS_QIES)) 1358 if (!(sts & DMA_GSTS_QIES))
1358 goto end; 1359 goto end;
1359 1360
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 986a53e3eb96..a2e1b7f14df2 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4367,7 +4367,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4367 rmrru->devices_cnt); 4367 rmrru->devices_cnt);
4368 if(ret < 0) 4368 if(ret < 0)
4369 return ret; 4369 return ret;
4370 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) { 4370 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4371 dmar_remove_dev_scope(info, rmrr->segment, 4371 dmar_remove_dev_scope(info, rmrr->segment,
4372 rmrru->devices, rmrru->devices_cnt); 4372 rmrru->devices, rmrru->devices_cnt);
4373 } 4373 }
@@ -4387,7 +4387,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4387 break; 4387 break;
4388 else if(ret < 0) 4388 else if(ret < 0)
4389 return ret; 4389 return ret;
4390 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) { 4390 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4391 if (dmar_remove_dev_scope(info, atsr->segment, 4391 if (dmar_remove_dev_scope(info, atsr->segment,
4392 atsru->devices, atsru->devices_cnt)) 4392 atsru->devices, atsru->devices_cnt))
4393 break; 4393 break;
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 50464833d0b8..d9939fa9b588 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -249,12 +249,30 @@ static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *s
249static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) 249static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
250{ 250{
251 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); 251 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
252 struct intel_svm_dev *sdev;
252 253
254 /* This might end up being called from exit_mmap(), *before* the page
255 * tables are cleared. And __mmu_notifier_release() will delete us from
256 * the list of notifiers so that our invalidate_range() callback doesn't
257 * get called when the page tables are cleared. So we need to protect
258 * against hardware accessing those page tables.
259 *
260 * We do it by clearing the entry in the PASID table and then flushing
261 * the IOTLB and the PASID table caches. This might upset hardware;
262 * perhaps we'll want to point the PASID to a dummy PGD (like the zero
263 * page) so that we end up taking a fault that the hardware really
264 * *has* to handle gracefully without affecting other processes.
265 */
253 svm->iommu->pasid_table[svm->pasid].val = 0; 266 svm->iommu->pasid_table[svm->pasid].val = 0;
267 wmb();
268
269 rcu_read_lock();
270 list_for_each_entry_rcu(sdev, &svm->devs, list) {
271 intel_flush_pasid_dev(svm, sdev, svm->pasid);
272 intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
273 }
274 rcu_read_unlock();
254 275
255 /* There's no need to do any flush because we can't get here if there
256 * are any devices left anyway. */
257 WARN_ON(!list_empty(&svm->devs));
258} 276}
259 277
260static const struct mmu_notifier_ops intel_mmuops = { 278static const struct mmu_notifier_ops intel_mmuops = {
@@ -379,7 +397,6 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
379 goto out; 397 goto out;
380 } 398 }
381 iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1; 399 iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1;
382 mm = NULL;
383 } else 400 } else
384 iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11); 401 iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11);
385 wmb(); 402 wmb();
@@ -442,11 +459,11 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
442 kfree_rcu(sdev, rcu); 459 kfree_rcu(sdev, rcu);
443 460
444 if (list_empty(&svm->devs)) { 461 if (list_empty(&svm->devs)) {
445 mmu_notifier_unregister(&svm->notifier, svm->mm);
446 462
447 idr_remove(&svm->iommu->pasid_idr, svm->pasid); 463 idr_remove(&svm->iommu->pasid_idr, svm->pasid);
448 if (svm->mm) 464 if (svm->mm)
449 mmput(svm->mm); 465 mmu_notifier_unregister(&svm->notifier, svm->mm);
466
450 /* We mandate that no page faults may be outstanding 467 /* We mandate that no page faults may be outstanding
451 * for the PASID when intel_svm_unbind_mm() is called. 468 * for the PASID when intel_svm_unbind_mm() is called.
452 * If that is not obeyed, subtle errors will happen. 469 * If that is not obeyed, subtle errors will happen.
@@ -507,6 +524,10 @@ static irqreturn_t prq_event_thread(int irq, void *d)
507 struct intel_svm *svm = NULL; 524 struct intel_svm *svm = NULL;
508 int head, tail, handled = 0; 525 int head, tail, handled = 0;
509 526
527 /* Clear PPR bit before reading head/tail registers, to
528 * ensure that we get a new interrupt if needed. */
529 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
530
510 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; 531 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
511 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; 532 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
512 while (head != tail) { 533 while (head != tail) {
@@ -551,6 +572,9 @@ static irqreturn_t prq_event_thread(int irq, void *d)
551 * any faults on kernel addresses. */ 572 * any faults on kernel addresses. */
552 if (!svm->mm) 573 if (!svm->mm)
553 goto bad_req; 574 goto bad_req;
575 /* If the mm is already defunct, don't handle faults. */
576 if (!atomic_inc_not_zero(&svm->mm->mm_users))
577 goto bad_req;
554 down_read(&svm->mm->mmap_sem); 578 down_read(&svm->mm->mmap_sem);
555 vma = find_extend_vma(svm->mm, address); 579 vma = find_extend_vma(svm->mm, address);
556 if (!vma || address < vma->vm_start) 580 if (!vma || address < vma->vm_start)
@@ -567,6 +591,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
567 result = QI_RESP_SUCCESS; 591 result = QI_RESP_SUCCESS;
568 invalid: 592 invalid:
569 up_read(&svm->mm->mmap_sem); 593 up_read(&svm->mm->mmap_sem);
594 mmput(svm->mm);
570 bad_req: 595 bad_req:
571 /* Accounting for major/minor faults? */ 596 /* Accounting for major/minor faults? */
572 rcu_read_lock(); 597 rcu_read_lock();
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index c12ba4516df2..ac596928f6b4 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -629,7 +629,7 @@ static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
629 629
630 raw_spin_lock_irqsave(&iommu->register_lock, flags); 630 raw_spin_lock_irqsave(&iommu->register_lock, flags);
631 631
632 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 632 sts = readl(iommu->reg + DMAR_GSTS_REG);
633 if (!(sts & DMA_GSTS_IRES)) 633 if (!(sts & DMA_GSTS_IRES))
634 goto end; 634 goto end;
635 635
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 0a73632b28d5..43dfd15c1dd2 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -78,6 +78,9 @@ struct its_node {
78 78
79#define ITS_ITT_ALIGN SZ_256 79#define ITS_ITT_ALIGN SZ_256
80 80
81/* Convert page order to size in bytes */
82#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
83
81struct event_lpi_map { 84struct event_lpi_map {
82 unsigned long *lpi_map; 85 unsigned long *lpi_map;
83 u16 *col_map; 86 u16 *col_map;
@@ -600,11 +603,6 @@ static void its_unmask_irq(struct irq_data *d)
600 lpi_set_config(d, true); 603 lpi_set_config(d, true);
601} 604}
602 605
603static void its_eoi_irq(struct irq_data *d)
604{
605 gic_write_eoir(d->hwirq);
606}
607
608static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 606static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
609 bool force) 607 bool force)
610{ 608{
@@ -641,7 +639,7 @@ static struct irq_chip its_irq_chip = {
641 .name = "ITS", 639 .name = "ITS",
642 .irq_mask = its_mask_irq, 640 .irq_mask = its_mask_irq,
643 .irq_unmask = its_unmask_irq, 641 .irq_unmask = its_unmask_irq,
644 .irq_eoi = its_eoi_irq, 642 .irq_eoi = irq_chip_eoi_parent,
645 .irq_set_affinity = its_set_affinity, 643 .irq_set_affinity = its_set_affinity,
646 .irq_compose_msi_msg = its_irq_compose_msi_msg, 644 .irq_compose_msi_msg = its_irq_compose_msi_msg,
647}; 645};
@@ -846,7 +844,6 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
846 u64 type = GITS_BASER_TYPE(val); 844 u64 type = GITS_BASER_TYPE(val);
847 u64 entry_size = GITS_BASER_ENTRY_SIZE(val); 845 u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
848 int order = get_order(psz); 846 int order = get_order(psz);
849 int alloc_size;
850 int alloc_pages; 847 int alloc_pages;
851 u64 tmp; 848 u64 tmp;
852 void *base; 849 void *base;
@@ -878,9 +875,8 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
878 } 875 }
879 } 876 }
880 877
881 alloc_size = (1 << order) * PAGE_SIZE;
882retry_alloc_baser: 878retry_alloc_baser:
883 alloc_pages = (alloc_size / psz); 879 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
884 if (alloc_pages > GITS_BASER_PAGES_MAX) { 880 if (alloc_pages > GITS_BASER_PAGES_MAX) {
885 alloc_pages = GITS_BASER_PAGES_MAX; 881 alloc_pages = GITS_BASER_PAGES_MAX;
886 order = get_order(GITS_BASER_PAGES_MAX * psz); 882 order = get_order(GITS_BASER_PAGES_MAX * psz);
@@ -933,7 +929,7 @@ retry_baser:
933 shr = tmp & GITS_BASER_SHAREABILITY_MASK; 929 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
934 if (!shr) { 930 if (!shr) {
935 cache = GITS_BASER_nC; 931 cache = GITS_BASER_nC;
936 __flush_dcache_area(base, alloc_size); 932 __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
937 } 933 }
938 goto retry_baser; 934 goto retry_baser;
939 } 935 }
@@ -966,7 +962,7 @@ retry_baser:
966 } 962 }
967 963
968 pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", 964 pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
969 (int)(alloc_size / entry_size), 965 (int)(PAGE_ORDER_TO_SIZE(order) / entry_size),
970 its_base_type_string[type], 966 its_base_type_string[type],
971 (unsigned long)virt_to_phys(base), 967 (unsigned long)virt_to_phys(base),
972 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); 968 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 2a506fe0c8a4..d1f8ab915b15 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -373,13 +373,7 @@ static void gigaset_freecshw(struct cardstate *cs)
373 373
374static void gigaset_device_release(struct device *dev) 374static void gigaset_device_release(struct device *dev)
375{ 375{
376 struct cardstate *cs = dev_get_drvdata(dev); 376 kfree(container_of(dev, struct ser_cardstate, dev.dev));
377
378 if (!cs)
379 return;
380 dev_set_drvdata(dev, NULL);
381 kfree(cs->hw.ser);
382 cs->hw.ser = NULL;
383} 377}
384 378
385/* 379/*
@@ -408,7 +402,6 @@ static int gigaset_initcshw(struct cardstate *cs)
408 cs->hw.ser = NULL; 402 cs->hw.ser = NULL;
409 return rc; 403 return rc;
410 } 404 }
411 dev_set_drvdata(&cs->hw.ser->dev.dev, cs);
412 405
413 tasklet_init(&cs->write_tasklet, 406 tasklet_init(&cs->write_tasklet,
414 gigaset_modem_fill, (unsigned long) cs); 407 gigaset_modem_fill, (unsigned long) cs);
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 8e2944784e00..afde4edef9ae 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -392,7 +392,7 @@ read_dma(struct tiger_ch *bc, u32 idx, int cnt)
392 } 392 }
393 stat = bchannel_get_rxbuf(&bc->bch, cnt); 393 stat = bchannel_get_rxbuf(&bc->bch, cnt);
394 /* only transparent use the count here, HDLC overun is detected later */ 394 /* only transparent use the count here, HDLC overun is detected later */
395 if (stat == ENOMEM) { 395 if (stat == -ENOMEM) {
396 pr_warning("%s.B%d: No memory for %d bytes\n", 396 pr_warning("%s.B%d: No memory for %d bytes\n",
397 card->name, bc->bch.nr, cnt); 397 card->name, bc->bch.nr, cnt);
398 return; 398 return;
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 33224cb91c5b..9f6acd5d1d2e 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -572,11 +572,13 @@ int nvm_register(struct request_queue *q, char *disk_name,
572 } 572 }
573 } 573 }
574 574
575 ret = nvm_get_sysblock(dev, &dev->sb); 575 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
576 if (!ret) 576 ret = nvm_get_sysblock(dev, &dev->sb);
577 pr_err("nvm: device not initialized.\n"); 577 if (!ret)
578 else if (ret < 0) 578 pr_err("nvm: device not initialized.\n");
579 pr_err("nvm: err (%d) on device initialization\n", ret); 579 else if (ret < 0)
580 pr_err("nvm: err (%d) on device initialization\n", ret);
581 }
580 582
581 /* register device with a supported media manager */ 583 /* register device with a supported media manager */
582 down_write(&nvm_lock); 584 down_write(&nvm_lock);
@@ -1055,9 +1057,11 @@ static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
1055 strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN); 1057 strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
1056 info.fs_ppa.ppa = -1; 1058 info.fs_ppa.ppa = -1;
1057 1059
1058 ret = nvm_init_sysblock(dev, &info); 1060 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
1059 if (ret) 1061 ret = nvm_init_sysblock(dev, &info);
1060 return ret; 1062 if (ret)
1063 return ret;
1064 }
1061 1065
1062 memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info)); 1066 memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
1063 1067
@@ -1117,7 +1121,10 @@ static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1117 dev->mt = NULL; 1121 dev->mt = NULL;
1118 } 1122 }
1119 1123
1120 return nvm_dev_factory(dev, fact.flags); 1124 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
1125 return nvm_dev_factory(dev, fact.flags);
1126
1127 return 0;
1121} 1128}
1122 1129
1123static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg) 1130static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index d8c75958ced3..307db1ea22de 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -300,8 +300,10 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
300 } 300 }
301 301
302 page = mempool_alloc(rrpc->page_pool, GFP_NOIO); 302 page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
303 if (!page) 303 if (!page) {
304 bio_put(bio);
304 return -ENOMEM; 305 return -ENOMEM;
306 }
305 307
306 while ((slot = find_first_zero_bit(rblk->invalid_pages, 308 while ((slot = find_first_zero_bit(rblk->invalid_pages,
307 nr_pgs_per_blk)) < nr_pgs_per_blk) { 309 nr_pgs_per_blk)) < nr_pgs_per_blk) {
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index ef13ac7700c8..f7b37336353f 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -174,8 +174,7 @@ static inline sector_t rrpc_get_sector(sector_t laddr)
174static inline int request_intersects(struct rrpc_inflight_rq *r, 174static inline int request_intersects(struct rrpc_inflight_rq *r,
175 sector_t laddr_start, sector_t laddr_end) 175 sector_t laddr_start, sector_t laddr_end)
176{ 176{
177 return (laddr_end >= r->l_start && laddr_end <= r->l_end) && 177 return (laddr_end >= r->l_start) && (laddr_start <= r->l_end);
178 (laddr_start >= r->l_start && laddr_start <= r->l_end);
179} 178}
180 179
181static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr, 180static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
@@ -184,6 +183,8 @@ static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
184 sector_t laddr_end = laddr + pages - 1; 183 sector_t laddr_end = laddr + pages - 1;
185 struct rrpc_inflight_rq *rtmp; 184 struct rrpc_inflight_rq *rtmp;
186 185
186 WARN_ON(irqs_disabled());
187
187 spin_lock_irq(&rrpc->inflights.lock); 188 spin_lock_irq(&rrpc->inflights.lock);
188 list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) { 189 list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
189 if (unlikely(request_intersects(rtmp, laddr, laddr_end))) { 190 if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 5df40480228b..dd834927bc66 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1191,6 +1191,8 @@ static void dm_unprep_request(struct request *rq)
1191 1191
1192 if (clone) 1192 if (clone)
1193 free_rq_clone(clone); 1193 free_rq_clone(clone);
1194 else if (!tio->md->queue->mq_ops)
1195 free_rq_tio(tio);
1194} 1196}
1195 1197
1196/* 1198/*
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c
index 7e9cbf757e95..fb7ed730d932 100644
--- a/drivers/media/i2c/adp1653.c
+++ b/drivers/media/i2c/adp1653.c
@@ -497,7 +497,7 @@ static int adp1653_probe(struct i2c_client *client,
497 if (!client->dev.platform_data) { 497 if (!client->dev.platform_data) {
498 dev_err(&client->dev, 498 dev_err(&client->dev,
499 "Neither DT not platform data provided\n"); 499 "Neither DT not platform data provided\n");
500 return EINVAL; 500 return -EINVAL;
501 } 501 }
502 flash->platform_data = client->dev.platform_data; 502 flash->platform_data = client->dev.platform_data;
503 } 503 }
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index f8dd7505b529..e1719ffdfb3d 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -1960,10 +1960,9 @@ static int adv76xx_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
1960 } 1960 }
1961 1961
1962 /* tx 5v detect */ 1962 /* tx 5v detect */
1963 tx_5v = io_read(sd, 0x70) & info->cable_det_mask; 1963 tx_5v = irq_reg_0x70 & info->cable_det_mask;
1964 if (tx_5v) { 1964 if (tx_5v) {
1965 v4l2_dbg(1, debug, sd, "%s: tx_5v: 0x%x\n", __func__, tx_5v); 1965 v4l2_dbg(1, debug, sd, "%s: tx_5v: 0x%x\n", __func__, tx_5v);
1966 io_write(sd, 0x71, tx_5v);
1967 adv76xx_s_detect_tx_5v_ctrl(sd); 1966 adv76xx_s_detect_tx_5v_ctrl(sd);
1968 if (handled) 1967 if (handled)
1969 *handled = true; 1968 *handled = true;
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 8c54fd21022e..a13625722848 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -1843,8 +1843,7 @@ static void au0828_analog_create_entities(struct au0828_dev *dev)
1843 ent->function = MEDIA_ENT_F_CONN_RF; 1843 ent->function = MEDIA_ENT_F_CONN_RF;
1844 break; 1844 break;
1845 default: /* AU0828_VMUX_DEBUG */ 1845 default: /* AU0828_VMUX_DEBUG */
1846 ent->function = MEDIA_ENT_F_CONN_TEST; 1846 continue;
1847 break;
1848 } 1847 }
1849 1848
1850 ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]); 1849 ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]);
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 4c1903f781fc..0c6c17a1c59e 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -415,7 +415,7 @@ static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
415 delta = mftb() - psl_tb; 415 delta = mftb() - psl_tb;
416 if (delta < 0) 416 if (delta < 0)
417 delta = -delta; 417 delta = -delta;
418 } while (cputime_to_usecs(delta) > 16); 418 } while (tb_to_ns(delta) > 16000);
419 419
420 return 0; 420 return 0;
421} 421}
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index b6639ea0bf18..f6e4d9718035 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2232,6 +2232,7 @@ err_irq:
2232 dma_release_channel(host->tx_chan); 2232 dma_release_channel(host->tx_chan);
2233 if (host->rx_chan) 2233 if (host->rx_chan)
2234 dma_release_channel(host->rx_chan); 2234 dma_release_channel(host->rx_chan);
2235 pm_runtime_dont_use_autosuspend(host->dev);
2235 pm_runtime_put_sync(host->dev); 2236 pm_runtime_put_sync(host->dev);
2236 pm_runtime_disable(host->dev); 2237 pm_runtime_disable(host->dev);
2237 if (host->dbclk) 2238 if (host->dbclk)
@@ -2253,6 +2254,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
2253 dma_release_channel(host->tx_chan); 2254 dma_release_channel(host->tx_chan);
2254 dma_release_channel(host->rx_chan); 2255 dma_release_channel(host->rx_chan);
2255 2256
2257 pm_runtime_dont_use_autosuspend(host->dev);
2256 pm_runtime_put_sync(host->dev); 2258 pm_runtime_put_sync(host->dev);
2257 pm_runtime_disable(host->dev); 2259 pm_runtime_disable(host->dev);
2258 device_init_wakeup(&pdev->dev, false); 2260 device_init_wakeup(&pdev->dev, false);
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 2a1b6e037e1a..0134ba32a057 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -193,7 +193,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
193 vol->changing_leb = 1; 193 vol->changing_leb = 1;
194 vol->ch_lnum = req->lnum; 194 vol->ch_lnum = req->lnum;
195 195
196 vol->upd_buf = vmalloc(req->bytes); 196 vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size));
197 if (!vol->upd_buf) 197 if (!vol->upd_buf)
198 return -ENOMEM; 198 return -ENOMEM;
199 199
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 56b560558884..b7f1a9919033 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -214,6 +214,8 @@ static void bond_uninit(struct net_device *bond_dev);
214static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, 214static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
215 struct rtnl_link_stats64 *stats); 215 struct rtnl_link_stats64 *stats);
216static void bond_slave_arr_handler(struct work_struct *work); 216static void bond_slave_arr_handler(struct work_struct *work);
217static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
218 int mod);
217 219
218/*---------------------------- General routines -----------------------------*/ 220/*---------------------------- General routines -----------------------------*/
219 221
@@ -2127,6 +2129,7 @@ static void bond_miimon_commit(struct bonding *bond)
2127 continue; 2129 continue;
2128 2130
2129 case BOND_LINK_UP: 2131 case BOND_LINK_UP:
2132 bond_update_speed_duplex(slave);
2130 bond_set_slave_link_state(slave, BOND_LINK_UP, 2133 bond_set_slave_link_state(slave, BOND_LINK_UP,
2131 BOND_SLAVE_NOTIFY_NOW); 2134 BOND_SLAVE_NOTIFY_NOW);
2132 slave->last_link_up = jiffies; 2135 slave->last_link_up = jiffies;
@@ -2459,7 +2462,7 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2459 struct slave *slave) 2462 struct slave *slave)
2460{ 2463{
2461 struct arphdr *arp = (struct arphdr *)skb->data; 2464 struct arphdr *arp = (struct arphdr *)skb->data;
2462 struct slave *curr_active_slave; 2465 struct slave *curr_active_slave, *curr_arp_slave;
2463 unsigned char *arp_ptr; 2466 unsigned char *arp_ptr;
2464 __be32 sip, tip; 2467 __be32 sip, tip;
2465 int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP); 2468 int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
@@ -2506,26 +2509,41 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2506 &sip, &tip); 2509 &sip, &tip);
2507 2510
2508 curr_active_slave = rcu_dereference(bond->curr_active_slave); 2511 curr_active_slave = rcu_dereference(bond->curr_active_slave);
2512 curr_arp_slave = rcu_dereference(bond->current_arp_slave);
2509 2513
2510 /* Backup slaves won't see the ARP reply, but do come through 2514 /* We 'trust' the received ARP enough to validate it if:
2511 * here for each ARP probe (so we swap the sip/tip to validate 2515 *
2512 * the probe). In a "redundant switch, common router" type of 2516 * (a) the slave receiving the ARP is active (which includes the
2513 * configuration, the ARP probe will (hopefully) travel from 2517 * current ARP slave, if any), or
2514 * the active, through one switch, the router, then the other 2518 *
2515 * switch before reaching the backup. 2519 * (b) the receiving slave isn't active, but there is a currently
2520 * active slave and it received valid arp reply(s) after it became
2521 * the currently active slave, or
2522 *
2523 * (c) there is an ARP slave that sent an ARP during the prior ARP
2524 * interval, and we receive an ARP reply on any slave. We accept
2525 * these because switch FDB update delays may deliver the ARP
2526 * reply to a slave other than the sender of the ARP request.
2516 * 2527 *
2517 * We 'trust' the arp requests if there is an active slave and 2528 * Note: for (b), backup slaves are receiving the broadcast ARP
2518 * it received valid arp reply(s) after it became active. This 2529 * request, not a reply. This request passes from the sending
2519 * is done to avoid endless looping when we can't reach the 2530 * slave through the L2 switch(es) to the receiving slave. Since
2531 * this is checking the request, sip/tip are swapped for
2532 * validation.
2533 *
2534 * This is done to avoid endless looping when we can't reach the
2520 * arp_ip_target and fool ourselves with our own arp requests. 2535 * arp_ip_target and fool ourselves with our own arp requests.
2521 */ 2536 */
2522
2523 if (bond_is_active_slave(slave)) 2537 if (bond_is_active_slave(slave))
2524 bond_validate_arp(bond, slave, sip, tip); 2538 bond_validate_arp(bond, slave, sip, tip);
2525 else if (curr_active_slave && 2539 else if (curr_active_slave &&
2526 time_after(slave_last_rx(bond, curr_active_slave), 2540 time_after(slave_last_rx(bond, curr_active_slave),
2527 curr_active_slave->last_link_up)) 2541 curr_active_slave->last_link_up))
2528 bond_validate_arp(bond, slave, tip, sip); 2542 bond_validate_arp(bond, slave, tip, sip);
2543 else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
2544 bond_time_in_interval(bond,
2545 dev_trans_start(curr_arp_slave->dev), 1))
2546 bond_validate_arp(bond, slave, sip, tip);
2529 2547
2530out_unlock: 2548out_unlock:
2531 if (arp != (struct arphdr *)skb->data) 2549 if (arp != (struct arphdr *)skb->data)
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index fc5b75675cd8..eb7192fab593 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -117,6 +117,9 @@ MODULE_LICENSE("GPL v2");
117 */ 117 */
118#define EMS_USB_ARM7_CLOCK 8000000 118#define EMS_USB_ARM7_CLOCK 8000000
119 119
120#define CPC_TX_QUEUE_TRIGGER_LOW 25
121#define CPC_TX_QUEUE_TRIGGER_HIGH 35
122
120/* 123/*
121 * CAN-Message representation in a CPC_MSG. Message object type is 124 * CAN-Message representation in a CPC_MSG. Message object type is
122 * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or 125 * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
@@ -278,6 +281,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
278 switch (urb->status) { 281 switch (urb->status) {
279 case 0: 282 case 0:
280 dev->free_slots = dev->intr_in_buffer[1]; 283 dev->free_slots = dev->intr_in_buffer[1];
284 if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
285 if (netif_queue_stopped(netdev)){
286 netif_wake_queue(netdev);
287 }
288 }
281 break; 289 break;
282 290
283 case -ECONNRESET: /* unlink */ 291 case -ECONNRESET: /* unlink */
@@ -526,8 +534,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
526 /* Release context */ 534 /* Release context */
527 context->echo_index = MAX_TX_URBS; 535 context->echo_index = MAX_TX_URBS;
528 536
529 if (netif_queue_stopped(netdev))
530 netif_wake_queue(netdev);
531} 537}
532 538
533/* 539/*
@@ -587,7 +593,7 @@ static int ems_usb_start(struct ems_usb *dev)
587 int err, i; 593 int err, i;
588 594
589 dev->intr_in_buffer[0] = 0; 595 dev->intr_in_buffer[0] = 0;
590 dev->free_slots = 15; /* initial size */ 596 dev->free_slots = 50; /* initial size */
591 597
592 for (i = 0; i < MAX_RX_URBS; i++) { 598 for (i = 0; i < MAX_RX_URBS; i++) {
593 struct urb *urb = NULL; 599 struct urb *urb = NULL;
@@ -835,7 +841,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
835 841
836 /* Slow down tx path */ 842 /* Slow down tx path */
837 if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS || 843 if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
838 dev->free_slots < 5) { 844 dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
839 netif_stop_queue(netdev); 845 netif_stop_queue(netdev);
840 } 846 }
841 } 847 }
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index cc6c54553418..a47f52f44b0d 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -25,6 +25,7 @@
25static const struct mv88e6xxx_switch_id mv88e6352_table[] = { 25static const struct mv88e6xxx_switch_id mv88e6352_table[] = {
26 { PORT_SWITCH_ID_6172, "Marvell 88E6172" }, 26 { PORT_SWITCH_ID_6172, "Marvell 88E6172" },
27 { PORT_SWITCH_ID_6176, "Marvell 88E6176" }, 27 { PORT_SWITCH_ID_6176, "Marvell 88E6176" },
28 { PORT_SWITCH_ID_6240, "Marvell 88E6240" },
28 { PORT_SWITCH_ID_6320, "Marvell 88E6320" }, 29 { PORT_SWITCH_ID_6320, "Marvell 88E6320" },
29 { PORT_SWITCH_ID_6320_A1, "Marvell 88E6320 (A1)" }, 30 { PORT_SWITCH_ID_6320_A1, "Marvell 88E6320 (A1)" },
30 { PORT_SWITCH_ID_6320_A2, "Marvell 88e6320 (A2)" }, 31 { PORT_SWITCH_ID_6320_A2, "Marvell 88e6320 (A2)" },
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index cf34681af4f6..512c8c0be1b4 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -1555,7 +1555,7 @@ static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
1555 1555
1556 if (vlan.vid != vid || !vlan.valid || 1556 if (vlan.vid != vid || !vlan.valid ||
1557 vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) 1557 vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
1558 return -ENOENT; 1558 return -EOPNOTSUPP;
1559 1559
1560 vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; 1560 vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1561 1561
@@ -1582,6 +1582,7 @@ int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
1582 const struct switchdev_obj_port_vlan *vlan) 1582 const struct switchdev_obj_port_vlan *vlan)
1583{ 1583{
1584 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1584 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1585 const u16 defpvid = 4000 + ds->index * DSA_MAX_PORTS + port;
1585 u16 pvid, vid; 1586 u16 pvid, vid;
1586 int err = 0; 1587 int err = 0;
1587 1588
@@ -1597,7 +1598,8 @@ int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
1597 goto unlock; 1598 goto unlock;
1598 1599
1599 if (vid == pvid) { 1600 if (vid == pvid) {
1600 err = _mv88e6xxx_port_pvid_set(ds, port, 0); 1601 /* restore reserved VLAN ID */
1602 err = _mv88e6xxx_port_pvid_set(ds, port, defpvid);
1601 if (err) 1603 if (err)
1602 goto unlock; 1604 goto unlock;
1603 } 1605 }
@@ -1889,26 +1891,20 @@ unlock:
1889 1891
1890int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, u32 members) 1892int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, u32 members)
1891{ 1893{
1892 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1894 return 0;
1893 const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port;
1894 int err;
1895
1896 /* The port joined a bridge, so leave its reserved VLAN */
1897 mutex_lock(&ps->smi_mutex);
1898 err = _mv88e6xxx_port_vlan_del(ds, port, pvid);
1899 if (!err)
1900 err = _mv88e6xxx_port_pvid_set(ds, port, 0);
1901 mutex_unlock(&ps->smi_mutex);
1902 return err;
1903} 1895}
1904 1896
1905int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, u32 members) 1897int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, u32 members)
1906{ 1898{
1899 return 0;
1900}
1901
1902static int mv88e6xxx_setup_port_default_vlan(struct dsa_switch *ds, int port)
1903{
1907 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1904 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1908 const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port; 1905 const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port;
1909 int err; 1906 int err;
1910 1907
1911 /* The port left the bridge, so join its reserved VLAN */
1912 mutex_lock(&ps->smi_mutex); 1908 mutex_lock(&ps->smi_mutex);
1913 err = _mv88e6xxx_port_vlan_add(ds, port, pvid, true); 1909 err = _mv88e6xxx_port_vlan_add(ds, port, pvid, true);
1914 if (!err) 1910 if (!err)
@@ -2192,8 +2188,7 @@ int mv88e6xxx_setup_ports(struct dsa_switch *ds)
2192 if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)) 2188 if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
2193 continue; 2189 continue;
2194 2190
2195 /* setup the unbridged state */ 2191 ret = mv88e6xxx_setup_port_default_vlan(ds, i);
2196 ret = mv88e6xxx_port_bridge_leave(ds, i, 0);
2197 if (ret < 0) 2192 if (ret < 0)
2198 return ret; 2193 return ret;
2199 } 2194 }
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 2777289a26c0..2f79d29f17f2 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -1501,6 +1501,7 @@ static const struct pcmcia_device_id pcnet_ids[] = {
1501 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030a), 1501 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030a),
1502 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1103), 1502 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1103),
1503 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1121), 1503 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1121),
1504 PCMCIA_DEVICE_MANF_CARD(0xc001, 0x0009),
1504 PCMCIA_DEVICE_PROD_ID12("2408LAN", "Ethernet", 0x352fff7f, 0x00b2e941), 1505 PCMCIA_DEVICE_PROD_ID12("2408LAN", "Ethernet", 0x352fff7f, 0x00b2e941),
1505 PCMCIA_DEVICE_PROD_ID1234("Socket", "CF 10/100 Ethernet Card", "Revision B", "05/11/06", 0xb38bcc2e, 0x4de88352, 0xeaca6c8d, 0x7e57c22e), 1506 PCMCIA_DEVICE_PROD_ID1234("Socket", "CF 10/100 Ethernet Card", "Revision B", "05/11/06", 0xb38bcc2e, 0x4de88352, 0xeaca6c8d, 0x7e57c22e),
1506 PCMCIA_DEVICE_PROD_ID123("Cardwell", "PCMCIA", "ETHERNET", 0x9533672e, 0x281f1c5d, 0x3ff7175b), 1507 PCMCIA_DEVICE_PROD_ID123("Cardwell", "PCMCIA", "ETHERNET", 0x9533672e, 0x281f1c5d, 0x3ff7175b),
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 3f3bcbea15bd..0907ab6ff309 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2380,7 +2380,7 @@ static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
2380 sizeof(u32), 2380 sizeof(u32),
2381 &tx_ring->tx_status_pa, 2381 &tx_ring->tx_status_pa,
2382 GFP_KERNEL); 2382 GFP_KERNEL);
2383 if (!tx_ring->tx_status_pa) { 2383 if (!tx_ring->tx_status) {
2384 dev_err(&adapter->pdev->dev, 2384 dev_err(&adapter->pdev->dev,
2385 "Cannot alloc memory for Tx status block\n"); 2385 "Cannot alloc memory for Tx status block\n");
2386 return -ENOMEM; 2386 return -ENOMEM;
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 87e727b921dc..fcdf5dda448f 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -50,8 +50,8 @@ static const char version[] =
50static void write_rreg(u_long base, u_int reg, u_int val) 50static void write_rreg(u_long base, u_int reg, u_int val)
51{ 51{
52 asm volatile( 52 asm volatile(
53 "str%?h %1, [%2] @ NET_RAP\n\t" 53 "strh %1, [%2] @ NET_RAP\n\t"
54 "str%?h %0, [%2, #-4] @ NET_RDP" 54 "strh %0, [%2, #-4] @ NET_RDP"
55 : 55 :
56 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); 56 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
57} 57}
@@ -60,8 +60,8 @@ static inline unsigned short read_rreg(u_long base_addr, u_int reg)
60{ 60{
61 unsigned short v; 61 unsigned short v;
62 asm volatile( 62 asm volatile(
63 "str%?h %1, [%2] @ NET_RAP\n\t" 63 "strh %1, [%2] @ NET_RAP\n\t"
64 "ldr%?h %0, [%2, #-4] @ NET_RDP" 64 "ldrh %0, [%2, #-4] @ NET_RDP"
65 : "=r" (v) 65 : "=r" (v)
66 : "r" (reg), "r" (ISAIO_BASE + 0x0464)); 66 : "r" (reg), "r" (ISAIO_BASE + 0x0464));
67 return v; 67 return v;
@@ -70,8 +70,8 @@ static inline unsigned short read_rreg(u_long base_addr, u_int reg)
70static inline void write_ireg(u_long base, u_int reg, u_int val) 70static inline void write_ireg(u_long base, u_int reg, u_int val)
71{ 71{
72 asm volatile( 72 asm volatile(
73 "str%?h %1, [%2] @ NET_RAP\n\t" 73 "strh %1, [%2] @ NET_RAP\n\t"
74 "str%?h %0, [%2, #8] @ NET_IDP" 74 "strh %0, [%2, #8] @ NET_IDP"
75 : 75 :
76 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); 76 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
77} 77}
@@ -80,8 +80,8 @@ static inline unsigned short read_ireg(u_long base_addr, u_int reg)
80{ 80{
81 u_short v; 81 u_short v;
82 asm volatile( 82 asm volatile(
83 "str%?h %1, [%2] @ NAT_RAP\n\t" 83 "strh %1, [%2] @ NAT_RAP\n\t"
84 "ldr%?h %0, [%2, #8] @ NET_IDP\n\t" 84 "ldrh %0, [%2, #8] @ NET_IDP\n\t"
85 : "=r" (v) 85 : "=r" (v)
86 : "r" (reg), "r" (ISAIO_BASE + 0x0464)); 86 : "r" (reg), "r" (ISAIO_BASE + 0x0464));
87 return v; 87 return v;
@@ -96,7 +96,7 @@ am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigne
96 offset = ISAMEM_BASE + (offset << 1); 96 offset = ISAMEM_BASE + (offset << 1);
97 length = (length + 1) & ~1; 97 length = (length + 1) & ~1;
98 if ((int)buf & 2) { 98 if ((int)buf & 2) {
99 asm volatile("str%?h %2, [%0], #4" 99 asm volatile("strh %2, [%0], #4"
100 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); 100 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
101 buf += 2; 101 buf += 2;
102 length -= 2; 102 length -= 2;
@@ -104,20 +104,20 @@ am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigne
104 while (length > 8) { 104 while (length > 8) {
105 register unsigned int tmp asm("r2"), tmp2 asm("r3"); 105 register unsigned int tmp asm("r2"), tmp2 asm("r3");
106 asm volatile( 106 asm volatile(
107 "ldm%?ia %0!, {%1, %2}" 107 "ldmia %0!, {%1, %2}"
108 : "+r" (buf), "=&r" (tmp), "=&r" (tmp2)); 108 : "+r" (buf), "=&r" (tmp), "=&r" (tmp2));
109 length -= 8; 109 length -= 8;
110 asm volatile( 110 asm volatile(
111 "str%?h %1, [%0], #4\n\t" 111 "strh %1, [%0], #4\n\t"
112 "mov%? %1, %1, lsr #16\n\t" 112 "mov %1, %1, lsr #16\n\t"
113 "str%?h %1, [%0], #4\n\t" 113 "strh %1, [%0], #4\n\t"
114 "str%?h %2, [%0], #4\n\t" 114 "strh %2, [%0], #4\n\t"
115 "mov%? %2, %2, lsr #16\n\t" 115 "mov %2, %2, lsr #16\n\t"
116 "str%?h %2, [%0], #4" 116 "strh %2, [%0], #4"
117 : "+r" (offset), "=&r" (tmp), "=&r" (tmp2)); 117 : "+r" (offset), "=&r" (tmp), "=&r" (tmp2));
118 } 118 }
119 while (length > 0) { 119 while (length > 0) {
120 asm volatile("str%?h %2, [%0], #4" 120 asm volatile("strh %2, [%0], #4"
121 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); 121 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
122 buf += 2; 122 buf += 2;
123 length -= 2; 123 length -= 2;
@@ -132,23 +132,23 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
132 if ((int)buf & 2) { 132 if ((int)buf & 2) {
133 unsigned int tmp; 133 unsigned int tmp;
134 asm volatile( 134 asm volatile(
135 "ldr%?h %2, [%0], #4\n\t" 135 "ldrh %2, [%0], #4\n\t"
136 "str%?b %2, [%1], #1\n\t" 136 "strb %2, [%1], #1\n\t"
137 "mov%? %2, %2, lsr #8\n\t" 137 "mov %2, %2, lsr #8\n\t"
138 "str%?b %2, [%1], #1" 138 "strb %2, [%1], #1"
139 : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf)); 139 : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf));
140 length -= 2; 140 length -= 2;
141 } 141 }
142 while (length > 8) { 142 while (length > 8) {
143 register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3; 143 register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3;
144 asm volatile( 144 asm volatile(
145 "ldr%?h %2, [%0], #4\n\t" 145 "ldrh %2, [%0], #4\n\t"
146 "ldr%?h %4, [%0], #4\n\t" 146 "ldrh %4, [%0], #4\n\t"
147 "ldr%?h %3, [%0], #4\n\t" 147 "ldrh %3, [%0], #4\n\t"
148 "orr%? %2, %2, %4, lsl #16\n\t" 148 "orr %2, %2, %4, lsl #16\n\t"
149 "ldr%?h %4, [%0], #4\n\t" 149 "ldrh %4, [%0], #4\n\t"
150 "orr%? %3, %3, %4, lsl #16\n\t" 150 "orr %3, %3, %4, lsl #16\n\t"
151 "stm%?ia %1!, {%2, %3}" 151 "stmia %1!, {%2, %3}"
152 : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3) 152 : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3)
153 : "0" (offset), "1" (buf)); 153 : "0" (offset), "1" (buf));
154 length -= 8; 154 length -= 8;
@@ -156,10 +156,10 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
156 while (length > 0) { 156 while (length > 0) {
157 unsigned int tmp; 157 unsigned int tmp;
158 asm volatile( 158 asm volatile(
159 "ldr%?h %2, [%0], #4\n\t" 159 "ldrh %2, [%0], #4\n\t"
160 "str%?b %2, [%1], #1\n\t" 160 "strb %2, [%1], #1\n\t"
161 "mov%? %2, %2, lsr #8\n\t" 161 "mov %2, %2, lsr #8\n\t"
162 "str%?b %2, [%1], #1" 162 "strb %2, [%1], #1"
163 : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf)); 163 : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf));
164 length -= 2; 164 length -= 2;
165 } 165 }
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 256f590f6bb1..3a7ebfdda57d 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -547,8 +547,8 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
547 /* Make certain the data structures used by the LANCE are aligned and DMAble. */ 547 /* Make certain the data structures used by the LANCE are aligned and DMAble. */
548 548
549 lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL); 549 lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
550 if(lp==NULL) 550 if (!lp)
551 return -ENODEV; 551 return -ENOMEM;
552 if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp); 552 if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
553 dev->ml_priv = lp; 553 dev->ml_priv = lp;
554 lp->name = chipname; 554 lp->name = chipname;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index abe1eabc0171..6446af1403f7 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -163,7 +163,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
163 struct sk_buff *skb = tx_buff->skb; 163 struct sk_buff *skb = tx_buff->skb;
164 unsigned int info = le32_to_cpu(txbd->info); 164 unsigned int info = le32_to_cpu(txbd->info);
165 165
166 if ((info & FOR_EMAC) || !txbd->data) 166 if ((info & FOR_EMAC) || !txbd->data || !skb)
167 break; 167 break;
168 168
169 if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) { 169 if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) {
@@ -191,6 +191,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
191 191
192 txbd->data = 0; 192 txbd->data = 0;
193 txbd->info = 0; 193 txbd->info = 0;
194 tx_buff->skb = NULL;
194 195
195 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; 196 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
196 } 197 }
@@ -446,6 +447,9 @@ static int arc_emac_open(struct net_device *ndev)
446 *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; 447 *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;
447 } 448 }
448 449
450 priv->txbd_curr = 0;
451 priv->txbd_dirty = 0;
452
449 /* Clean Tx BD's */ 453 /* Clean Tx BD's */
450 memset(priv->txbd, 0, TX_RING_SZ); 454 memset(priv->txbd, 0, TX_RING_SZ);
451 455
@@ -514,6 +518,64 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
514} 518}
515 519
516/** 520/**
521 * arc_free_tx_queue - free skb from tx queue
522 * @ndev: Pointer to the network device.
523 *
524 * This function must be called while EMAC disable
525 */
526static void arc_free_tx_queue(struct net_device *ndev)
527{
528 struct arc_emac_priv *priv = netdev_priv(ndev);
529 unsigned int i;
530
531 for (i = 0; i < TX_BD_NUM; i++) {
532 struct arc_emac_bd *txbd = &priv->txbd[i];
533 struct buffer_state *tx_buff = &priv->tx_buff[i];
534
535 if (tx_buff->skb) {
536 dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr),
537 dma_unmap_len(tx_buff, len), DMA_TO_DEVICE);
538
539 /* return the sk_buff to system */
540 dev_kfree_skb_irq(tx_buff->skb);
541 }
542
543 txbd->info = 0;
544 txbd->data = 0;
545 tx_buff->skb = NULL;
546 }
547}
548
549/**
550 * arc_free_rx_queue - free skb from rx queue
551 * @ndev: Pointer to the network device.
552 *
553 * This function must be called while EMAC disable
554 */
555static void arc_free_rx_queue(struct net_device *ndev)
556{
557 struct arc_emac_priv *priv = netdev_priv(ndev);
558 unsigned int i;
559
560 for (i = 0; i < RX_BD_NUM; i++) {
561 struct arc_emac_bd *rxbd = &priv->rxbd[i];
562 struct buffer_state *rx_buff = &priv->rx_buff[i];
563
564 if (rx_buff->skb) {
565 dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
566 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
567
568 /* return the sk_buff to system */
569 dev_kfree_skb_irq(rx_buff->skb);
570 }
571
572 rxbd->info = 0;
573 rxbd->data = 0;
574 rx_buff->skb = NULL;
575 }
576}
577
578/**
517 * arc_emac_stop - Close the network device. 579 * arc_emac_stop - Close the network device.
518 * @ndev: Pointer to the network device. 580 * @ndev: Pointer to the network device.
519 * 581 *
@@ -534,6 +596,10 @@ static int arc_emac_stop(struct net_device *ndev)
534 /* Disable EMAC */ 596 /* Disable EMAC */
535 arc_reg_clr(priv, R_CTRL, EN_MASK); 597 arc_reg_clr(priv, R_CTRL, EN_MASK);
536 598
599 /* Return the sk_buff to system */
600 arc_free_tx_queue(ndev);
601 arc_free_rx_queue(ndev);
602
537 return 0; 603 return 0;
538} 604}
539 605
@@ -610,7 +676,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
610 dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr); 676 dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr);
611 dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len); 677 dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len);
612 678
613 priv->tx_buff[*txbd_curr].skb = skb;
614 priv->txbd[*txbd_curr].data = cpu_to_le32(addr); 679 priv->txbd[*txbd_curr].data = cpu_to_le32(addr);
615 680
616 /* Make sure pointer to data buffer is set */ 681 /* Make sure pointer to data buffer is set */
@@ -620,6 +685,11 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
620 685
621 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); 686 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
622 687
688 /* Make sure info word is set */
689 wmb();
690
691 priv->tx_buff[*txbd_curr].skb = skb;
692
623 /* Increment index to point to the next BD */ 693 /* Increment index to point to the next BD */
624 *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; 694 *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
625 695
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index d946bba43726..1fb80100e5e7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6185,26 +6185,80 @@ static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
6185 shift -= 4; 6185 shift -= 4;
6186 digit = ((num & mask) >> shift); 6186 digit = ((num & mask) >> shift);
6187 if (digit == 0 && remove_leading_zeros) { 6187 if (digit == 0 && remove_leading_zeros) {
6188 mask = mask >> 4; 6188 *str_ptr = '0';
6189 continue; 6189 } else {
6190 } else if (digit < 0xa) 6190 if (digit < 0xa)
6191 *str_ptr = digit + '0'; 6191 *str_ptr = digit + '0';
6192 else 6192 else
6193 *str_ptr = digit - 0xa + 'a'; 6193 *str_ptr = digit - 0xa + 'a';
6194 remove_leading_zeros = 0; 6194
6195 str_ptr++; 6195 remove_leading_zeros = 0;
6196 (*len)--; 6196 str_ptr++;
6197 (*len)--;
6198 }
6197 mask = mask >> 4; 6199 mask = mask >> 4;
6198 if (shift == 4*4) { 6200 if (shift == 4*4) {
6201 if (remove_leading_zeros) {
6202 str_ptr++;
6203 (*len)--;
6204 }
6199 *str_ptr = '.'; 6205 *str_ptr = '.';
6200 str_ptr++; 6206 str_ptr++;
6201 (*len)--; 6207 (*len)--;
6202 remove_leading_zeros = 1; 6208 remove_leading_zeros = 1;
6203 } 6209 }
6204 } 6210 }
6211 if (remove_leading_zeros)
6212 (*len)--;
6205 return 0; 6213 return 0;
6206} 6214}
6207 6215
6216static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
6217{
6218 u8 *str_ptr = str;
6219 u32 mask = 0x00f00000;
6220 u8 shift = 8*3;
6221 u8 digit;
6222 u8 remove_leading_zeros = 1;
6223
6224 if (*len < 10) {
6225 /* Need more than 10chars for this format */
6226 *str_ptr = '\0';
6227 (*len)--;
6228 return -EINVAL;
6229 }
6230
6231 while (shift > 0) {
6232 shift -= 4;
6233 digit = ((num & mask) >> shift);
6234 if (digit == 0 && remove_leading_zeros) {
6235 *str_ptr = '0';
6236 } else {
6237 if (digit < 0xa)
6238 *str_ptr = digit + '0';
6239 else
6240 *str_ptr = digit - 0xa + 'a';
6241
6242 remove_leading_zeros = 0;
6243 str_ptr++;
6244 (*len)--;
6245 }
6246 mask = mask >> 4;
6247 if ((shift == 4*4) || (shift == 4*2)) {
6248 if (remove_leading_zeros) {
6249 str_ptr++;
6250 (*len)--;
6251 }
6252 *str_ptr = '.';
6253 str_ptr++;
6254 (*len)--;
6255 remove_leading_zeros = 1;
6256 }
6257 }
6258 if (remove_leading_zeros)
6259 (*len)--;
6260 return 0;
6261}
6208 6262
6209static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len) 6263static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
6210{ 6264{
@@ -9677,8 +9731,9 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
9677 9731
9678 if (bnx2x_is_8483x_8485x(phy)) { 9732 if (bnx2x_is_8483x_8485x(phy)) {
9679 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1); 9733 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
9680 bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff, 9734 if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)
9681 phy->ver_addr); 9735 fw_ver1 &= 0xfff;
9736 bnx2x_save_spirom_version(bp, port, fw_ver1, phy->ver_addr);
9682 } else { 9737 } else {
9683 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */ 9738 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
9684 /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ 9739 /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
@@ -9732,16 +9787,32 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
9732static void bnx2x_848xx_set_led(struct bnx2x *bp, 9787static void bnx2x_848xx_set_led(struct bnx2x *bp,
9733 struct bnx2x_phy *phy) 9788 struct bnx2x_phy *phy)
9734{ 9789{
9735 u16 val, offset, i; 9790 u16 val, led3_blink_rate, offset, i;
9736 static struct bnx2x_reg_set reg_set[] = { 9791 static struct bnx2x_reg_set reg_set[] = {
9737 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080}, 9792 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080},
9738 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018}, 9793 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018},
9739 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006}, 9794 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006},
9740 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_BLINK, 0x0000},
9741 {MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH, 9795 {MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
9742 MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ}, 9796 MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ},
9743 {MDIO_AN_DEVAD, 0xFFFB, 0xFFFD} 9797 {MDIO_AN_DEVAD, 0xFFFB, 0xFFFD}
9744 }; 9798 };
9799
9800 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
9801 /* Set LED5 source */
9802 bnx2x_cl45_write(bp, phy,
9803 MDIO_PMA_DEVAD,
9804 MDIO_PMA_REG_8481_LED5_MASK,
9805 0x90);
9806 led3_blink_rate = 0x000f;
9807 } else {
9808 led3_blink_rate = 0x0000;
9809 }
9810 /* Set LED3 BLINK */
9811 bnx2x_cl45_write(bp, phy,
9812 MDIO_PMA_DEVAD,
9813 MDIO_PMA_REG_8481_LED3_BLINK,
9814 led3_blink_rate);
9815
9745 /* PHYC_CTL_LED_CTL */ 9816 /* PHYC_CTL_LED_CTL */
9746 bnx2x_cl45_read(bp, phy, 9817 bnx2x_cl45_read(bp, phy,
9747 MDIO_PMA_DEVAD, 9818 MDIO_PMA_DEVAD,
@@ -9749,6 +9820,9 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
9749 val &= 0xFE00; 9820 val &= 0xFE00;
9750 val |= 0x0092; 9821 val |= 0x0092;
9751 9822
9823 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)
9824 val |= 2 << 12; /* LED5 ON based on source */
9825
9752 bnx2x_cl45_write(bp, phy, 9826 bnx2x_cl45_write(bp, phy,
9753 MDIO_PMA_DEVAD, 9827 MDIO_PMA_DEVAD,
9754 MDIO_PMA_REG_8481_LINK_SIGNAL, val); 9828 MDIO_PMA_REG_8481_LINK_SIGNAL, val);
@@ -9762,10 +9836,17 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
9762 else 9836 else
9763 offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1; 9837 offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
9764 9838
9765 /* stretch_en for LED3*/ 9839 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)
9840 val = MDIO_PMA_REG_84858_ALLOW_GPHY_ACT |
9841 MDIO_PMA_REG_84823_LED3_STRETCH_EN;
9842 else
9843 val = MDIO_PMA_REG_84823_LED3_STRETCH_EN;
9844
9845 /* stretch_en for LEDs */
9766 bnx2x_cl45_read_or_write(bp, phy, 9846 bnx2x_cl45_read_or_write(bp, phy,
9767 MDIO_PMA_DEVAD, offset, 9847 MDIO_PMA_DEVAD,
9768 MDIO_PMA_REG_84823_LED3_STRETCH_EN); 9848 offset,
9849 val);
9769} 9850}
9770 9851
9771static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy, 9852static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
@@ -9775,7 +9856,7 @@ static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
9775 struct bnx2x *bp = params->bp; 9856 struct bnx2x *bp = params->bp;
9776 switch (action) { 9857 switch (action) {
9777 case PHY_INIT: 9858 case PHY_INIT:
9778 if (!bnx2x_is_8483x_8485x(phy)) { 9859 if (bnx2x_is_8483x_8485x(phy)) {
9779 /* Save spirom version */ 9860 /* Save spirom version */
9780 bnx2x_save_848xx_spirom_version(phy, bp, params->port); 9861 bnx2x_save_848xx_spirom_version(phy, bp, params->port);
9781 } 9862 }
@@ -10036,15 +10117,20 @@ static int bnx2x_84858_cmd_hdlr(struct bnx2x_phy *phy,
10036 10117
10037static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, 10118static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
10038 struct link_params *params, u16 fw_cmd, 10119 struct link_params *params, u16 fw_cmd,
10039 u16 cmd_args[], int argc) 10120 u16 cmd_args[], int argc, int process)
10040{ 10121{
10041 int idx; 10122 int idx;
10042 u16 val; 10123 u16 val;
10043 struct bnx2x *bp = params->bp; 10124 struct bnx2x *bp = params->bp;
10044 /* Write CMD_OPEN_OVERRIDE to STATUS reg */ 10125 int rc = 0;
10045 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 10126
10046 MDIO_848xx_CMD_HDLR_STATUS, 10127 if (process == PHY84833_MB_PROCESS2) {
10047 PHY84833_STATUS_CMD_OPEN_OVERRIDE); 10128 /* Write CMD_OPEN_OVERRIDE to STATUS reg */
10129 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10130 MDIO_848xx_CMD_HDLR_STATUS,
10131 PHY84833_STATUS_CMD_OPEN_OVERRIDE);
10132 }
10133
10048 for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { 10134 for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
10049 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 10135 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
10050 MDIO_848xx_CMD_HDLR_STATUS, &val); 10136 MDIO_848xx_CMD_HDLR_STATUS, &val);
@@ -10054,15 +10140,27 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
10054 } 10140 }
10055 if (idx >= PHY848xx_CMDHDLR_WAIT) { 10141 if (idx >= PHY848xx_CMDHDLR_WAIT) {
10056 DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n"); 10142 DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
10143 /* if the status is CMD_COMPLETE_PASS or CMD_COMPLETE_ERROR
10144 * clear the status to CMD_CLEAR_COMPLETE
10145 */
10146 if (val == PHY84833_STATUS_CMD_COMPLETE_PASS ||
10147 val == PHY84833_STATUS_CMD_COMPLETE_ERROR) {
10148 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10149 MDIO_848xx_CMD_HDLR_STATUS,
10150 PHY84833_STATUS_CMD_CLEAR_COMPLETE);
10151 }
10057 return -EINVAL; 10152 return -EINVAL;
10058 } 10153 }
10059 10154 if (process == PHY84833_MB_PROCESS1 ||
10060 /* Prepare argument(s) and issue command */ 10155 process == PHY84833_MB_PROCESS2) {
10061 for (idx = 0; idx < argc; idx++) { 10156 /* Prepare argument(s) */
10062 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 10157 for (idx = 0; idx < argc; idx++) {
10063 MDIO_848xx_CMD_HDLR_DATA1 + idx, 10158 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10064 cmd_args[idx]); 10159 MDIO_848xx_CMD_HDLR_DATA1 + idx,
10160 cmd_args[idx]);
10161 }
10065 } 10162 }
10163
10066 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 10164 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10067 MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd); 10165 MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
10068 for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { 10166 for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
@@ -10076,24 +10174,30 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
10076 if ((idx >= PHY848xx_CMDHDLR_WAIT) || 10174 if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
10077 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { 10175 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
10078 DP(NETIF_MSG_LINK, "FW cmd failed.\n"); 10176 DP(NETIF_MSG_LINK, "FW cmd failed.\n");
10079 return -EINVAL; 10177 rc = -EINVAL;
10080 } 10178 }
10081 /* Gather returning data */ 10179 if (process == PHY84833_MB_PROCESS3 && rc == 0) {
10082 for (idx = 0; idx < argc; idx++) { 10180 /* Gather returning data */
10083 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 10181 for (idx = 0; idx < argc; idx++) {
10084 MDIO_848xx_CMD_HDLR_DATA1 + idx, 10182 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
10085 &cmd_args[idx]); 10183 MDIO_848xx_CMD_HDLR_DATA1 + idx,
10184 &cmd_args[idx]);
10185 }
10086 } 10186 }
10087 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 10187 if (val == PHY84833_STATUS_CMD_COMPLETE_ERROR ||
10088 MDIO_848xx_CMD_HDLR_STATUS, 10188 val == PHY84833_STATUS_CMD_COMPLETE_PASS) {
10089 PHY84833_STATUS_CMD_CLEAR_COMPLETE); 10189 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10090 return 0; 10190 MDIO_848xx_CMD_HDLR_STATUS,
10191 PHY84833_STATUS_CMD_CLEAR_COMPLETE);
10192 }
10193 return rc;
10091} 10194}
10092 10195
10093static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy, 10196static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy,
10094 struct link_params *params, 10197 struct link_params *params,
10095 u16 fw_cmd, 10198 u16 fw_cmd,
10096 u16 cmd_args[], int argc) 10199 u16 cmd_args[], int argc,
10200 int process)
10097{ 10201{
10098 struct bnx2x *bp = params->bp; 10202 struct bnx2x *bp = params->bp;
10099 10203
@@ -10106,7 +10210,7 @@ static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy,
10106 argc); 10210 argc);
10107 } else { 10211 } else {
10108 return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args, 10212 return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args,
10109 argc); 10213 argc, process);
10110 } 10214 }
10111} 10215}
10112 10216
@@ -10133,7 +10237,7 @@ static int bnx2x_848xx_pair_swap_cfg(struct bnx2x_phy *phy,
10133 10237
10134 status = bnx2x_848xx_cmd_hdlr(phy, params, 10238 status = bnx2x_848xx_cmd_hdlr(phy, params,
10135 PHY848xx_CMD_SET_PAIR_SWAP, data, 10239 PHY848xx_CMD_SET_PAIR_SWAP, data,
10136 PHY848xx_CMDHDLR_MAX_ARGS); 10240 2, PHY84833_MB_PROCESS2);
10137 if (status == 0) 10241 if (status == 0)
10138 DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]); 10242 DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
10139 10243
@@ -10222,8 +10326,8 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
10222 DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n"); 10326 DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
10223 10327
10224 /* Prevent Phy from working in EEE and advertising it */ 10328 /* Prevent Phy from working in EEE and advertising it */
10225 rc = bnx2x_848xx_cmd_hdlr(phy, params, 10329 rc = bnx2x_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE,
10226 PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1); 10330 &cmd_args, 1, PHY84833_MB_PROCESS1);
10227 if (rc) { 10331 if (rc) {
10228 DP(NETIF_MSG_LINK, "EEE disable failed.\n"); 10332 DP(NETIF_MSG_LINK, "EEE disable failed.\n");
10229 return rc; 10333 return rc;
@@ -10240,8 +10344,8 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
10240 struct bnx2x *bp = params->bp; 10344 struct bnx2x *bp = params->bp;
10241 u16 cmd_args = 1; 10345 u16 cmd_args = 1;
10242 10346
10243 rc = bnx2x_848xx_cmd_hdlr(phy, params, 10347 rc = bnx2x_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE,
10244 PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1); 10348 &cmd_args, 1, PHY84833_MB_PROCESS1);
10245 if (rc) { 10349 if (rc) {
10246 DP(NETIF_MSG_LINK, "EEE enable failed.\n"); 10350 DP(NETIF_MSG_LINK, "EEE enable failed.\n");
10247 return rc; 10351 return rc;
@@ -10362,7 +10466,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
10362 cmd_args[3] = PHY84833_CONSTANT_LATENCY; 10466 cmd_args[3] = PHY84833_CONSTANT_LATENCY;
10363 rc = bnx2x_848xx_cmd_hdlr(phy, params, 10467 rc = bnx2x_848xx_cmd_hdlr(phy, params,
10364 PHY848xx_CMD_SET_EEE_MODE, cmd_args, 10468 PHY848xx_CMD_SET_EEE_MODE, cmd_args,
10365 PHY848xx_CMDHDLR_MAX_ARGS); 10469 4, PHY84833_MB_PROCESS1);
10366 if (rc) 10470 if (rc)
10367 DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n"); 10471 DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
10368 } 10472 }
@@ -10416,6 +10520,32 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
10416 vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK; 10520 vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
10417 } 10521 }
10418 10522
10523 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
10524 /* Additional settings for jumbo packets in 1000BASE-T mode */
10525 /* Allow rx extended length */
10526 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
10527 MDIO_AN_REG_8481_AUX_CTRL, &val);
10528 val |= 0x4000;
10529 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
10530 MDIO_AN_REG_8481_AUX_CTRL, val);
10531 /* TX FIFO Elasticity LSB */
10532 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
10533 MDIO_AN_REG_8481_1G_100T_EXT_CTRL, &val);
10534 val |= 0x1;
10535 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
10536 MDIO_AN_REG_8481_1G_100T_EXT_CTRL, val);
10537 /* TX FIFO Elasticity MSB */
10538 /* Enable expansion register 0x46 (Pattern Generator status) */
10539 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
10540 MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf46);
10541
10542 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
10543 MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, &val);
10544 val |= 0x4000;
10545 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
10546 MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, val);
10547 }
10548
10419 if (bnx2x_is_8483x_8485x(phy)) { 10549 if (bnx2x_is_8483x_8485x(phy)) {
10420 /* Bring PHY out of super isolate mode as the final step. */ 10550 /* Bring PHY out of super isolate mode as the final step. */
10421 bnx2x_cl45_read_and_write(bp, phy, 10551 bnx2x_cl45_read_and_write(bp, phy,
@@ -10555,6 +10685,17 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
10555 return link_up; 10685 return link_up;
10556} 10686}
10557 10687
10688static int bnx2x_8485x_format_ver(u32 raw_ver, u8 *str, u16 *len)
10689{
10690 int status = 0;
10691 u32 num;
10692
10693 num = ((raw_ver & 0xF80) >> 7) << 16 | ((raw_ver & 0x7F) << 8) |
10694 ((raw_ver & 0xF000) >> 12);
10695 status = bnx2x_3_seq_format_ver(num, str, len);
10696 return status;
10697}
10698
10558static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len) 10699static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
10559{ 10700{
10560 int status = 0; 10701 int status = 0;
@@ -10651,10 +10792,25 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10651 0x0); 10792 0x0);
10652 10793
10653 } else { 10794 } else {
10795 /* LED 1 OFF */
10654 bnx2x_cl45_write(bp, phy, 10796 bnx2x_cl45_write(bp, phy,
10655 MDIO_PMA_DEVAD, 10797 MDIO_PMA_DEVAD,
10656 MDIO_PMA_REG_8481_LED1_MASK, 10798 MDIO_PMA_REG_8481_LED1_MASK,
10657 0x0); 10799 0x0);
10800
10801 if (phy->type ==
10802 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
10803 /* LED 2 OFF */
10804 bnx2x_cl45_write(bp, phy,
10805 MDIO_PMA_DEVAD,
10806 MDIO_PMA_REG_8481_LED2_MASK,
10807 0x0);
10808 /* LED 3 OFF */
10809 bnx2x_cl45_write(bp, phy,
10810 MDIO_PMA_DEVAD,
10811 MDIO_PMA_REG_8481_LED3_MASK,
10812 0x0);
10813 }
10658 } 10814 }
10659 break; 10815 break;
10660 case LED_MODE_FRONT_PANEL_OFF: 10816 case LED_MODE_FRONT_PANEL_OFF:
@@ -10713,6 +10869,19 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10713 MDIO_PMA_REG_8481_SIGNAL_MASK, 10869 MDIO_PMA_REG_8481_SIGNAL_MASK,
10714 0x0); 10870 0x0);
10715 } 10871 }
10872 if (phy->type ==
10873 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
10874 /* LED 2 OFF */
10875 bnx2x_cl45_write(bp, phy,
10876 MDIO_PMA_DEVAD,
10877 MDIO_PMA_REG_8481_LED2_MASK,
10878 0x0);
10879 /* LED 3 OFF */
10880 bnx2x_cl45_write(bp, phy,
10881 MDIO_PMA_DEVAD,
10882 MDIO_PMA_REG_8481_LED3_MASK,
10883 0x0);
10884 }
10716 } 10885 }
10717 break; 10886 break;
10718 case LED_MODE_ON: 10887 case LED_MODE_ON:
@@ -10776,6 +10945,25 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10776 params->port*4, 10945 params->port*4,
10777 NIG_MASK_MI_INT); 10946 NIG_MASK_MI_INT);
10778 } 10947 }
10948 }
10949 if (phy->type ==
10950 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
10951 /* Tell LED3 to constant on */
10952 bnx2x_cl45_read(bp, phy,
10953 MDIO_PMA_DEVAD,
10954 MDIO_PMA_REG_8481_LINK_SIGNAL,
10955 &val);
10956 val &= ~(7<<6);
10957 val |= (2<<6); /* A83B[8:6]= 2 */
10958 bnx2x_cl45_write(bp, phy,
10959 MDIO_PMA_DEVAD,
10960 MDIO_PMA_REG_8481_LINK_SIGNAL,
10961 val);
10962 bnx2x_cl45_write(bp, phy,
10963 MDIO_PMA_DEVAD,
10964 MDIO_PMA_REG_8481_LED3_MASK,
10965 0x20);
10966 } else {
10779 bnx2x_cl45_write(bp, phy, 10967 bnx2x_cl45_write(bp, phy,
10780 MDIO_PMA_DEVAD, 10968 MDIO_PMA_DEVAD,
10781 MDIO_PMA_REG_8481_SIGNAL_MASK, 10969 MDIO_PMA_REG_8481_SIGNAL_MASK,
@@ -10854,6 +11042,17 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10854 MDIO_PMA_REG_8481_LINK_SIGNAL, 11042 MDIO_PMA_REG_8481_LINK_SIGNAL,
10855 val); 11043 val);
10856 if (phy->type == 11044 if (phy->type ==
11045 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
11046 bnx2x_cl45_write(bp, phy,
11047 MDIO_PMA_DEVAD,
11048 MDIO_PMA_REG_8481_LED2_MASK,
11049 0x18);
11050 bnx2x_cl45_write(bp, phy,
11051 MDIO_PMA_DEVAD,
11052 MDIO_PMA_REG_8481_LED3_MASK,
11053 0x06);
11054 }
11055 if (phy->type ==
10857 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) { 11056 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
10858 /* Restore LED4 source to external link, 11057 /* Restore LED4 source to external link,
10859 * and re-enable interrupts. 11058 * and re-enable interrupts.
@@ -11982,7 +12181,7 @@ static const struct bnx2x_phy phy_84858 = {
11982 .read_status = (read_status_t)bnx2x_848xx_read_status, 12181 .read_status = (read_status_t)bnx2x_848xx_read_status,
11983 .link_reset = (link_reset_t)bnx2x_848x3_link_reset, 12182 .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
11984 .config_loopback = (config_loopback_t)NULL, 12183 .config_loopback = (config_loopback_t)NULL,
11985 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, 12184 .format_fw_ver = (format_fw_ver_t)bnx2x_8485x_format_ver,
11986 .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, 12185 .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
11987 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, 12186 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
11988 .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func 12187 .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
@@ -13807,8 +14006,10 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
13807 if (CHIP_IS_E3(bp)) { 14006 if (CHIP_IS_E3(bp)) {
13808 struct bnx2x_phy *phy = &params->phy[INT_PHY]; 14007 struct bnx2x_phy *phy = &params->phy[INT_PHY];
13809 bnx2x_set_aer_mmd(params, phy); 14008 bnx2x_set_aer_mmd(params, phy);
13810 if ((phy->supported & SUPPORTED_20000baseKR2_Full) && 14009 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
13811 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) 14010 (phy->speed_cap_mask &
14011 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) ||
14012 (phy->req_line_speed == SPEED_20000))
13812 bnx2x_check_kr2_wa(params, vars, phy); 14013 bnx2x_check_kr2_wa(params, vars, phy);
13813 bnx2x_check_over_curr(params, vars); 14014 bnx2x_check_over_curr(params, vars);
13814 if (vars->rx_tx_asic_rst) 14015 if (vars->rx_tx_asic_rst)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 4dead49bd5cb..a43dea259b12 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -7296,6 +7296,8 @@ Theotherbitsarereservedandshouldbezero*/
7296#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3 7296#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
7297#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec 7297#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec
7298#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080 7298#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
7299/* BCM84858 only */
7300#define MDIO_PMA_REG_84858_ALLOW_GPHY_ACT 0x8000
7299 7301
7300/* BCM84833 only */ 7302/* BCM84833 only */
7301#define MDIO_84833_TOP_CFG_FW_REV 0x400f 7303#define MDIO_84833_TOP_CFG_FW_REV 0x400f
@@ -7337,6 +7339,10 @@ Theotherbitsarereservedandshouldbezero*/
7337#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040 7339#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040
7338#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080 7340#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080
7339#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5 7341#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5
7342/* Mailbox Process */
7343#define PHY84833_MB_PROCESS1 1
7344#define PHY84833_MB_PROCESS2 2
7345#define PHY84833_MB_PROCESS3 3
7340 7346
7341/* Mailbox status set used by 84858 only */ 7347/* Mailbox status set used by 84858 only */
7342#define PHY84858_STATUS_CMD_RECEIVED 0x0001 7348#define PHY84858_STATUS_CMD_RECEIVED 0x0001
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 5dc89e527e7d..8ab000dd52d9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -69,7 +69,7 @@ MODULE_VERSION(DRV_MODULE_VERSION);
69#define BNXT_RX_DMA_OFFSET NET_SKB_PAD 69#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
70#define BNXT_RX_COPY_THRESH 256 70#define BNXT_RX_COPY_THRESH 256
71 71
72#define BNXT_TX_PUSH_THRESH 92 72#define BNXT_TX_PUSH_THRESH 164
73 73
74enum board_idx { 74enum board_idx {
75 BCM57301, 75 BCM57301,
@@ -223,11 +223,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
223 } 223 }
224 224
225 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { 225 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
226 struct tx_push_bd *push = txr->tx_push; 226 struct tx_push_buffer *tx_push_buf = txr->tx_push;
227 struct tx_bd *tx_push = &push->txbd1; 227 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
228 struct tx_bd_ext *tx_push1 = &push->txbd2; 228 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
229 void *pdata = tx_push1 + 1; 229 void *pdata = tx_push_buf->data;
230 int j; 230 u64 *end;
231 int j, push_len;
231 232
232 /* Set COAL_NOW to be ready quickly for the next push */ 233 /* Set COAL_NOW to be ready quickly for the next push */
233 tx_push->tx_bd_len_flags_type = 234 tx_push->tx_bd_len_flags_type =
@@ -247,6 +248,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
247 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 248 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
248 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action); 249 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
249 250
251 end = PTR_ALIGN(pdata + length + 1, 8) - 1;
252 *end = 0;
253
250 skb_copy_from_linear_data(skb, pdata, len); 254 skb_copy_from_linear_data(skb, pdata, len);
251 pdata += len; 255 pdata += len;
252 for (j = 0; j < last_frag; j++) { 256 for (j = 0; j < last_frag; j++) {
@@ -261,22 +265,29 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
261 pdata += skb_frag_size(frag); 265 pdata += skb_frag_size(frag);
262 } 266 }
263 267
264 memcpy(txbd, tx_push, sizeof(*txbd)); 268 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
269 txbd->tx_bd_haddr = txr->data_mapping;
265 prod = NEXT_TX(prod); 270 prod = NEXT_TX(prod);
266 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 271 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
267 memcpy(txbd, tx_push1, sizeof(*txbd)); 272 memcpy(txbd, tx_push1, sizeof(*txbd));
268 prod = NEXT_TX(prod); 273 prod = NEXT_TX(prod);
269 push->doorbell = 274 tx_push->doorbell =
270 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); 275 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
271 txr->tx_prod = prod; 276 txr->tx_prod = prod;
272 277
273 netdev_tx_sent_queue(txq, skb->len); 278 netdev_tx_sent_queue(txq, skb->len);
274 279
275 __iowrite64_copy(txr->tx_doorbell, push, 280 push_len = (length + sizeof(*tx_push) + 7) / 8;
276 (length + sizeof(*push) + 8) / 8); 281 if (push_len > 16) {
282 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
283 __iowrite64_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
284 push_len - 16);
285 } else {
286 __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
287 push_len);
288 }
277 289
278 tx_buf->is_push = 1; 290 tx_buf->is_push = 1;
279
280 goto tx_done; 291 goto tx_done;
281 } 292 }
282 293
@@ -1753,7 +1764,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
1753 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 1764 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
1754 bp->tx_push_thresh); 1765 bp->tx_push_thresh);
1755 1766
1756 if (push_size > 128) { 1767 if (push_size > 256) {
1757 push_size = 0; 1768 push_size = 0;
1758 bp->tx_push_thresh = 0; 1769 bp->tx_push_thresh = 0;
1759 } 1770 }
@@ -1772,7 +1783,6 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
1772 return rc; 1783 return rc;
1773 1784
1774 if (bp->tx_push_size) { 1785 if (bp->tx_push_size) {
1775 struct tx_bd *txbd;
1776 dma_addr_t mapping; 1786 dma_addr_t mapping;
1777 1787
1778 /* One pre-allocated DMA buffer to backup 1788 /* One pre-allocated DMA buffer to backup
@@ -1786,13 +1796,11 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
1786 if (!txr->tx_push) 1796 if (!txr->tx_push)
1787 return -ENOMEM; 1797 return -ENOMEM;
1788 1798
1789 txbd = &txr->tx_push->txbd1;
1790
1791 mapping = txr->tx_push_mapping + 1799 mapping = txr->tx_push_mapping +
1792 sizeof(struct tx_push_bd); 1800 sizeof(struct tx_push_bd);
1793 txbd->tx_bd_haddr = cpu_to_le64(mapping); 1801 txr->data_mapping = cpu_to_le64(mapping);
1794 1802
1795 memset(txbd + 1, 0, sizeof(struct tx_bd_ext)); 1803 memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
1796 } 1804 }
1797 ring->queue_id = bp->q_info[j].queue_id; 1805 ring->queue_id = bp->q_info[j].queue_id;
1798 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) 1806 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
@@ -4546,20 +4554,18 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
4546 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 4554 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
4547 link_info->force_pause_setting != link_info->req_flow_ctrl) 4555 link_info->force_pause_setting != link_info->req_flow_ctrl)
4548 update_pause = true; 4556 update_pause = true;
4549 if (link_info->req_duplex != link_info->duplex_setting)
4550 update_link = true;
4551 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 4557 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
4552 if (BNXT_AUTO_MODE(link_info->auto_mode)) 4558 if (BNXT_AUTO_MODE(link_info->auto_mode))
4553 update_link = true; 4559 update_link = true;
4554 if (link_info->req_link_speed != link_info->force_link_speed) 4560 if (link_info->req_link_speed != link_info->force_link_speed)
4555 update_link = true; 4561 update_link = true;
4562 if (link_info->req_duplex != link_info->duplex_setting)
4563 update_link = true;
4556 } else { 4564 } else {
4557 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 4565 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
4558 update_link = true; 4566 update_link = true;
4559 if (link_info->advertising != link_info->auto_link_speeds) 4567 if (link_info->advertising != link_info->auto_link_speeds)
4560 update_link = true; 4568 update_link = true;
4561 if (link_info->req_link_speed != link_info->auto_link_speed)
4562 update_link = true;
4563 } 4569 }
4564 4570
4565 if (update_link) 4571 if (update_link)
@@ -4636,7 +4642,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4636 if (link_re_init) { 4642 if (link_re_init) {
4637 rc = bnxt_update_phy_setting(bp); 4643 rc = bnxt_update_phy_setting(bp);
4638 if (rc) 4644 if (rc)
4639 goto open_err; 4645 netdev_warn(bp->dev, "failed to update phy settings\n");
4640 } 4646 }
4641 4647
4642 if (irq_re_init) { 4648 if (irq_re_init) {
@@ -4654,6 +4660,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4654 /* Enable TX queues */ 4660 /* Enable TX queues */
4655 bnxt_tx_enable(bp); 4661 bnxt_tx_enable(bp);
4656 mod_timer(&bp->timer, jiffies + bp->current_interval); 4662 mod_timer(&bp->timer, jiffies + bp->current_interval);
4663 bnxt_update_link(bp, true);
4657 4664
4658 return 0; 4665 return 0;
4659 4666
@@ -5670,22 +5677,16 @@ static int bnxt_probe_phy(struct bnxt *bp)
5670 } 5677 }
5671 5678
5672 /*initialize the ethool setting copy with NVM settings */ 5679 /*initialize the ethool setting copy with NVM settings */
5673 if (BNXT_AUTO_MODE(link_info->auto_mode)) 5680 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
5674 link_info->autoneg |= BNXT_AUTONEG_SPEED; 5681 link_info->autoneg = BNXT_AUTONEG_SPEED |
5675 5682 BNXT_AUTONEG_FLOW_CTRL;
5676 if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) { 5683 link_info->advertising = link_info->auto_link_speeds;
5677 if (link_info->auto_pause_setting == BNXT_LINK_PAUSE_BOTH)
5678 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
5679 link_info->req_flow_ctrl = link_info->auto_pause_setting; 5684 link_info->req_flow_ctrl = link_info->auto_pause_setting;
5680 } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) { 5685 } else {
5686 link_info->req_link_speed = link_info->force_link_speed;
5687 link_info->req_duplex = link_info->duplex_setting;
5681 link_info->req_flow_ctrl = link_info->force_pause_setting; 5688 link_info->req_flow_ctrl = link_info->force_pause_setting;
5682 } 5689 }
5683 link_info->req_duplex = link_info->duplex_setting;
5684 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
5685 link_info->req_link_speed = link_info->auto_link_speed;
5686 else
5687 link_info->req_link_speed = link_info->force_link_speed;
5688 link_info->advertising = link_info->auto_link_speeds;
5689 snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d", 5690 snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d",
5690 link_info->phy_ver[0], 5691 link_info->phy_ver[0],
5691 link_info->phy_ver[1], 5692 link_info->phy_ver[1],
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 8af3ca8efcef..2be51b332652 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -411,8 +411,8 @@ struct rx_tpa_end_cmp_ext {
411 411
412#define BNXT_NUM_TESTS(bp) 0 412#define BNXT_NUM_TESTS(bp) 0
413 413
414#define BNXT_DEFAULT_RX_RING_SIZE 1023 414#define BNXT_DEFAULT_RX_RING_SIZE 511
415#define BNXT_DEFAULT_TX_RING_SIZE 512 415#define BNXT_DEFAULT_TX_RING_SIZE 511
416 416
417#define MAX_TPA 64 417#define MAX_TPA 64
418 418
@@ -523,10 +523,16 @@ struct bnxt_ring_struct {
523 523
524struct tx_push_bd { 524struct tx_push_bd {
525 __le32 doorbell; 525 __le32 doorbell;
526 struct tx_bd txbd1; 526 __le32 tx_bd_len_flags_type;
527 u32 tx_bd_opaque;
527 struct tx_bd_ext txbd2; 528 struct tx_bd_ext txbd2;
528}; 529};
529 530
531struct tx_push_buffer {
532 struct tx_push_bd push_bd;
533 u32 data[25];
534};
535
530struct bnxt_tx_ring_info { 536struct bnxt_tx_ring_info {
531 struct bnxt_napi *bnapi; 537 struct bnxt_napi *bnapi;
532 u16 tx_prod; 538 u16 tx_prod;
@@ -538,8 +544,9 @@ struct bnxt_tx_ring_info {
538 544
539 dma_addr_t tx_desc_mapping[MAX_TX_PAGES]; 545 dma_addr_t tx_desc_mapping[MAX_TX_PAGES];
540 546
541 struct tx_push_bd *tx_push; 547 struct tx_push_buffer *tx_push;
542 dma_addr_t tx_push_mapping; 548 dma_addr_t tx_push_mapping;
549 __le64 data_mapping;
543 550
544#define BNXT_DEV_STATE_CLOSING 0x1 551#define BNXT_DEV_STATE_CLOSING 0x1
545 u32 dev_state; 552 u32 dev_state;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 922b898e7a32..3238817dfd5f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -486,15 +486,8 @@ static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info)
486 speed_mask |= SUPPORTED_2500baseX_Full; 486 speed_mask |= SUPPORTED_2500baseX_Full;
487 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) 487 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
488 speed_mask |= SUPPORTED_10000baseT_Full; 488 speed_mask |= SUPPORTED_10000baseT_Full;
489 /* TODO: support 25GB, 50GB with different cable type */
490 if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
491 speed_mask |= SUPPORTED_20000baseMLD2_Full |
492 SUPPORTED_20000baseKR2_Full;
493 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) 489 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
494 speed_mask |= SUPPORTED_40000baseKR4_Full | 490 speed_mask |= SUPPORTED_40000baseCR4_Full;
495 SUPPORTED_40000baseCR4_Full |
496 SUPPORTED_40000baseSR4_Full |
497 SUPPORTED_40000baseLR4_Full;
498 491
499 return speed_mask; 492 return speed_mask;
500} 493}
@@ -514,15 +507,8 @@ static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
514 speed_mask |= ADVERTISED_2500baseX_Full; 507 speed_mask |= ADVERTISED_2500baseX_Full;
515 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) 508 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
516 speed_mask |= ADVERTISED_10000baseT_Full; 509 speed_mask |= ADVERTISED_10000baseT_Full;
517 /* TODO: how to advertise 20, 25, 40, 50GB with different cable type ?*/
518 if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
519 speed_mask |= ADVERTISED_20000baseMLD2_Full |
520 ADVERTISED_20000baseKR2_Full;
521 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) 510 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
522 speed_mask |= ADVERTISED_40000baseKR4_Full | 511 speed_mask |= ADVERTISED_40000baseCR4_Full;
523 ADVERTISED_40000baseCR4_Full |
524 ADVERTISED_40000baseSR4_Full |
525 ADVERTISED_40000baseLR4_Full;
526 return speed_mask; 512 return speed_mask;
527} 513}
528 514
@@ -557,11 +543,12 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
557 u16 ethtool_speed; 543 u16 ethtool_speed;
558 544
559 cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info); 545 cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info);
546 cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
560 547
561 if (link_info->auto_link_speeds) 548 if (link_info->auto_link_speeds)
562 cmd->supported |= SUPPORTED_Autoneg; 549 cmd->supported |= SUPPORTED_Autoneg;
563 550
564 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 551 if (link_info->autoneg) {
565 cmd->advertising = 552 cmd->advertising =
566 bnxt_fw_to_ethtool_advertised_spds(link_info); 553 bnxt_fw_to_ethtool_advertised_spds(link_info);
567 cmd->advertising |= ADVERTISED_Autoneg; 554 cmd->advertising |= ADVERTISED_Autoneg;
@@ -570,28 +557,16 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
570 cmd->autoneg = AUTONEG_DISABLE; 557 cmd->autoneg = AUTONEG_DISABLE;
571 cmd->advertising = 0; 558 cmd->advertising = 0;
572 } 559 }
573 if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) { 560 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) {
574 if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == 561 if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
575 BNXT_LINK_PAUSE_BOTH) { 562 BNXT_LINK_PAUSE_BOTH) {
576 cmd->advertising |= ADVERTISED_Pause; 563 cmd->advertising |= ADVERTISED_Pause;
577 cmd->supported |= SUPPORTED_Pause;
578 } else { 564 } else {
579 cmd->advertising |= ADVERTISED_Asym_Pause; 565 cmd->advertising |= ADVERTISED_Asym_Pause;
580 cmd->supported |= SUPPORTED_Asym_Pause;
581 if (link_info->auto_pause_setting & 566 if (link_info->auto_pause_setting &
582 BNXT_LINK_PAUSE_RX) 567 BNXT_LINK_PAUSE_RX)
583 cmd->advertising |= ADVERTISED_Pause; 568 cmd->advertising |= ADVERTISED_Pause;
584 } 569 }
585 } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
586 if ((link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
587 BNXT_LINK_PAUSE_BOTH) {
588 cmd->supported |= SUPPORTED_Pause;
589 } else {
590 cmd->supported |= SUPPORTED_Asym_Pause;
591 if (link_info->force_pause_setting &
592 BNXT_LINK_PAUSE_RX)
593 cmd->supported |= SUPPORTED_Pause;
594 }
595 } 570 }
596 571
597 cmd->port = PORT_NONE; 572 cmd->port = PORT_NONE;
@@ -670,6 +645,9 @@ static u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
670 if (advertising & ADVERTISED_10000baseT_Full) 645 if (advertising & ADVERTISED_10000baseT_Full)
671 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB; 646 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
672 647
648 if (advertising & ADVERTISED_40000baseCR4_Full)
649 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
650
673 return fw_speed_mask; 651 return fw_speed_mask;
674} 652}
675 653
@@ -729,7 +707,7 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
729 speed = ethtool_cmd_speed(cmd); 707 speed = ethtool_cmd_speed(cmd);
730 link_info->req_link_speed = bnxt_get_fw_speed(dev, speed); 708 link_info->req_link_speed = bnxt_get_fw_speed(dev, speed);
731 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; 709 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
732 link_info->autoneg &= ~BNXT_AUTONEG_SPEED; 710 link_info->autoneg = 0;
733 link_info->advertising = 0; 711 link_info->advertising = 0;
734 } 712 }
735 713
@@ -748,8 +726,7 @@ static void bnxt_get_pauseparam(struct net_device *dev,
748 726
749 if (BNXT_VF(bp)) 727 if (BNXT_VF(bp))
750 return; 728 return;
751 epause->autoneg = !!(link_info->auto_pause_setting & 729 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
752 BNXT_LINK_PAUSE_BOTH);
753 epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0); 730 epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0);
754 epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0); 731 epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0);
755} 732}
@@ -765,6 +742,9 @@ static int bnxt_set_pauseparam(struct net_device *dev,
765 return rc; 742 return rc;
766 743
767 if (epause->autoneg) { 744 if (epause->autoneg) {
745 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
746 return -EINVAL;
747
768 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 748 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
769 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH; 749 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH;
770 } else { 750 } else {
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index b15a60d787c7..d7e01a74e927 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2445,8 +2445,7 @@ static void bcmgenet_irq_task(struct work_struct *work)
2445 } 2445 }
2446 2446
2447 /* Link UP/DOWN event */ 2447 /* Link UP/DOWN event */
2448 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && 2448 if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
2449 (priv->irq0_stat & UMAC_IRQ_LINK_EVENT)) {
2450 phy_mac_interrupt(priv->phydev, 2449 phy_mac_interrupt(priv->phydev,
2451 !!(priv->irq0_stat & UMAC_IRQ_LINK_UP)); 2450 !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
2452 priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT; 2451 priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 872765527081..34d269cd5579 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1683,7 +1683,7 @@ static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
1683 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); 1683 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
1684 /* droq creation and local register settings. */ 1684 /* droq creation and local register settings. */
1685 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); 1685 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
1686 if (ret_val == -1) 1686 if (ret_val < 0)
1687 return ret_val; 1687 return ret_val;
1688 1688
1689 if (ret_val == 1) { 1689 if (ret_val == 1) {
@@ -2524,7 +2524,7 @@ static void handle_timestamp(struct octeon_device *oct,
2524 2524
2525 octeon_swap_8B_data(&resp->timestamp, 1); 2525 octeon_swap_8B_data(&resp->timestamp, 1);
2526 2526
2527 if (unlikely((skb_shinfo(skb)->tx_flags | SKBTX_IN_PROGRESS) != 0)) { 2527 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2528 struct skb_shared_hwtstamps ts; 2528 struct skb_shared_hwtstamps ts;
2529 u64 ns = resp->timestamp; 2529 u64 ns = resp->timestamp;
2530 2530
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 4dba86eaa045..174072b3740b 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -983,5 +983,5 @@ int octeon_create_droq(struct octeon_device *oct,
983 983
984create_droq_fail: 984create_droq_fail:
985 octeon_delete_droq(oct, q_no); 985 octeon_delete_droq(oct, q_no);
986 return -1; 986 return -ENOMEM;
987} 987}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index c24cb2a86a42..a009bc30dc4d 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -574,8 +574,7 @@ static inline void nicvf_set_rxhash(struct net_device *netdev,
574 574
575static void nicvf_rcv_pkt_handler(struct net_device *netdev, 575static void nicvf_rcv_pkt_handler(struct net_device *netdev,
576 struct napi_struct *napi, 576 struct napi_struct *napi,
577 struct cmp_queue *cq, 577 struct cqe_rx_t *cqe_rx)
578 struct cqe_rx_t *cqe_rx, int cqe_type)
579{ 578{
580 struct sk_buff *skb; 579 struct sk_buff *skb;
581 struct nicvf *nic = netdev_priv(netdev); 580 struct nicvf *nic = netdev_priv(netdev);
@@ -591,7 +590,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
591 } 590 }
592 591
593 /* Check for errors */ 592 /* Check for errors */
594 err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); 593 err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
595 if (err && !cqe_rx->rb_cnt) 594 if (err && !cqe_rx->rb_cnt)
596 return; 595 return;
597 596
@@ -682,8 +681,7 @@ loop:
682 cq_idx, cq_desc->cqe_type); 681 cq_idx, cq_desc->cqe_type);
683 switch (cq_desc->cqe_type) { 682 switch (cq_desc->cqe_type) {
684 case CQE_TYPE_RX: 683 case CQE_TYPE_RX:
685 nicvf_rcv_pkt_handler(netdev, napi, cq, 684 nicvf_rcv_pkt_handler(netdev, napi, cq_desc);
686 cq_desc, CQE_TYPE_RX);
687 work_done++; 685 work_done++;
688 break; 686 break;
689 case CQE_TYPE_SEND: 687 case CQE_TYPE_SEND:
@@ -1125,7 +1123,6 @@ int nicvf_stop(struct net_device *netdev)
1125 1123
1126 /* Clear multiqset info */ 1124 /* Clear multiqset info */
1127 nic->pnicvf = nic; 1125 nic->pnicvf = nic;
1128 nic->sqs_count = 0;
1129 1126
1130 return 0; 1127 return 0;
1131} 1128}
@@ -1354,6 +1351,9 @@ void nicvf_update_stats(struct nicvf *nic)
1354 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + 1351 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
1355 stats->tx_bcast_frames_ok + 1352 stats->tx_bcast_frames_ok +
1356 stats->tx_mcast_frames_ok; 1353 stats->tx_mcast_frames_ok;
1354 drv_stats->rx_frames_ok = stats->rx_ucast_frames +
1355 stats->rx_bcast_frames +
1356 stats->rx_mcast_frames;
1357 drv_stats->rx_drops = stats->rx_drop_red + 1357 drv_stats->rx_drops = stats->rx_drop_red +
1358 stats->rx_drop_overrun; 1358 stats->rx_drop_overrun;
1359 drv_stats->tx_drops = stats->tx_drops; 1359 drv_stats->tx_drops = stats->tx_drops;
@@ -1538,6 +1538,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1538 1538
1539 nicvf_send_vf_struct(nic); 1539 nicvf_send_vf_struct(nic);
1540 1540
1541 if (!pass1_silicon(nic->pdev))
1542 nic->hw_tso = true;
1543
1541 /* Check if this VF is in QS only mode */ 1544 /* Check if this VF is in QS only mode */
1542 if (nic->sqs_mode) 1545 if (nic->sqs_mode)
1543 return 0; 1546 return 0;
@@ -1557,9 +1560,6 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1557 1560
1558 netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; 1561 netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
1559 1562
1560 if (!pass1_silicon(nic->pdev))
1561 nic->hw_tso = true;
1562
1563 netdev->netdev_ops = &nicvf_netdev_ops; 1563 netdev->netdev_ops = &nicvf_netdev_ops;
1564 netdev->watchdog_timeo = NICVF_TX_TIMEOUT; 1564 netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
1565 1565
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index d0d1b5490061..767347b1f631 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1329,16 +1329,12 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
1329} 1329}
1330 1330
1331/* Check for errors in the receive cmp.queue entry */ 1331/* Check for errors in the receive cmp.queue entry */
1332int nicvf_check_cqe_rx_errs(struct nicvf *nic, 1332int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1333 struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
1334{ 1333{
1335 struct nicvf_hw_stats *stats = &nic->hw_stats; 1334 struct nicvf_hw_stats *stats = &nic->hw_stats;
1336 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1337 1335
1338 if (!cqe_rx->err_level && !cqe_rx->err_opcode) { 1336 if (!cqe_rx->err_level && !cqe_rx->err_opcode)
1339 drv_stats->rx_frames_ok++;
1340 return 0; 1337 return 0;
1341 }
1342 1338
1343 if (netif_msg_rx_err(nic)) 1339 if (netif_msg_rx_err(nic))
1344 netdev_err(nic->netdev, 1340 netdev_err(nic->netdev,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index c5030a7f213a..6673e1133523 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -338,8 +338,7 @@ u64 nicvf_queue_reg_read(struct nicvf *nic,
338/* Stats */ 338/* Stats */
339void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); 339void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
340void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); 340void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
341int nicvf_check_cqe_rx_errs(struct nicvf *nic, 341int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
342 struct cmp_queue *cq, struct cqe_rx_t *cqe_rx);
343int nicvf_check_cqe_tx_errs(struct nicvf *nic, 342int nicvf_check_cqe_tx_errs(struct nicvf *nic,
344 struct cmp_queue *cq, struct cqe_send_t *cqe_tx); 343 struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
345#endif /* NICVF_QUEUES_H */ 344#endif /* NICVF_QUEUES_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index ee04caa6c4d8..a89721fad633 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -681,6 +681,24 @@ int t3_seeprom_wp(struct adapter *adapter, int enable)
681 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); 681 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
682} 682}
683 683
684static int vpdstrtouint(char *s, int len, unsigned int base, unsigned int *val)
685{
686 char tok[len + 1];
687
688 memcpy(tok, s, len);
689 tok[len] = 0;
690 return kstrtouint(strim(tok), base, val);
691}
692
693static int vpdstrtou16(char *s, int len, unsigned int base, u16 *val)
694{
695 char tok[len + 1];
696
697 memcpy(tok, s, len);
698 tok[len] = 0;
699 return kstrtou16(strim(tok), base, val);
700}
701
684/** 702/**
685 * get_vpd_params - read VPD parameters from VPD EEPROM 703 * get_vpd_params - read VPD parameters from VPD EEPROM
686 * @adapter: adapter to read 704 * @adapter: adapter to read
@@ -709,19 +727,19 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
709 return ret; 727 return ret;
710 } 728 }
711 729
712 ret = kstrtouint(vpd.cclk_data, 10, &p->cclk); 730 ret = vpdstrtouint(vpd.cclk_data, vpd.cclk_len, 10, &p->cclk);
713 if (ret) 731 if (ret)
714 return ret; 732 return ret;
715 ret = kstrtouint(vpd.mclk_data, 10, &p->mclk); 733 ret = vpdstrtouint(vpd.mclk_data, vpd.mclk_len, 10, &p->mclk);
716 if (ret) 734 if (ret)
717 return ret; 735 return ret;
718 ret = kstrtouint(vpd.uclk_data, 10, &p->uclk); 736 ret = vpdstrtouint(vpd.uclk_data, vpd.uclk_len, 10, &p->uclk);
719 if (ret) 737 if (ret)
720 return ret; 738 return ret;
721 ret = kstrtouint(vpd.mdc_data, 10, &p->mdc); 739 ret = vpdstrtouint(vpd.mdc_data, vpd.mdc_len, 10, &p->mdc);
722 if (ret) 740 if (ret)
723 return ret; 741 return ret;
724 ret = kstrtouint(vpd.mt_data, 10, &p->mem_timing); 742 ret = vpdstrtouint(vpd.mt_data, vpd.mt_len, 10, &p->mem_timing);
725 if (ret) 743 if (ret)
726 return ret; 744 return ret;
727 memcpy(p->sn, vpd.sn_data, SERNUM_LEN); 745 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
@@ -733,10 +751,12 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
733 } else { 751 } else {
734 p->port_type[0] = hex_to_bin(vpd.port0_data[0]); 752 p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
735 p->port_type[1] = hex_to_bin(vpd.port1_data[0]); 753 p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
736 ret = kstrtou16(vpd.xaui0cfg_data, 16, &p->xauicfg[0]); 754 ret = vpdstrtou16(vpd.xaui0cfg_data, vpd.xaui0cfg_len, 16,
755 &p->xauicfg[0]);
737 if (ret) 756 if (ret)
738 return ret; 757 return ret;
739 ret = kstrtou16(vpd.xaui1cfg_data, 16, &p->xauicfg[1]); 758 ret = vpdstrtou16(vpd.xaui1cfg_data, vpd.xaui1cfg_len, 16,
759 &p->xauicfg[1]);
740 if (ret) 760 if (ret)
741 return ret; 761 return ret;
742 } 762 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index a8dda635456d..06bc2d2e7a73 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -165,6 +165,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
165 CH_PCI_ID_TABLE_FENTRY(0x5098), /* Custom 2x40G QSFP */ 165 CH_PCI_ID_TABLE_FENTRY(0x5098), /* Custom 2x40G QSFP */
166 CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */ 166 CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */
167 CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */ 167 CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */
168 CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */
168 169
169 /* T6 adapters: 170 /* T6 adapters:
170 */ 171 */
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index cf94b72dbacd..48d91941408d 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -128,7 +128,6 @@ struct board_info {
128 struct resource *data_res; 128 struct resource *data_res;
129 struct resource *addr_req; /* resources requested */ 129 struct resource *addr_req; /* resources requested */
130 struct resource *data_req; 130 struct resource *data_req;
131 struct resource *irq_res;
132 131
133 int irq_wake; 132 int irq_wake;
134 133
@@ -1300,22 +1299,16 @@ static int
1300dm9000_open(struct net_device *dev) 1299dm9000_open(struct net_device *dev)
1301{ 1300{
1302 struct board_info *db = netdev_priv(dev); 1301 struct board_info *db = netdev_priv(dev);
1303 unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK;
1304 1302
1305 if (netif_msg_ifup(db)) 1303 if (netif_msg_ifup(db))
1306 dev_dbg(db->dev, "enabling %s\n", dev->name); 1304 dev_dbg(db->dev, "enabling %s\n", dev->name);
1307 1305
1308 /* If there is no IRQ type specified, default to something that 1306 /* If there is no IRQ type specified, tell the user that this is a
1309 * may work, and tell the user that this is a problem */ 1307 * problem
1310 1308 */
1311 if (irqflags == IRQF_TRIGGER_NONE) 1309 if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE)
1312 irqflags = irq_get_trigger_type(dev->irq);
1313
1314 if (irqflags == IRQF_TRIGGER_NONE)
1315 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); 1310 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1316 1311
1317 irqflags |= IRQF_SHARED;
1318
1319 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ 1312 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
1320 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ 1313 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
1321 mdelay(1); /* delay needs by DM9000B */ 1314 mdelay(1); /* delay needs by DM9000B */
@@ -1323,7 +1316,8 @@ dm9000_open(struct net_device *dev)
1323 /* Initialize DM9000 board */ 1316 /* Initialize DM9000 board */
1324 dm9000_init_dm9000(dev); 1317 dm9000_init_dm9000(dev);
1325 1318
1326 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) 1319 if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED,
1320 dev->name, dev))
1327 return -EAGAIN; 1321 return -EAGAIN;
1328 /* Now that we have an interrupt handler hooked up we can unmask 1322 /* Now that we have an interrupt handler hooked up we can unmask
1329 * our interrupts 1323 * our interrupts
@@ -1500,15 +1494,22 @@ dm9000_probe(struct platform_device *pdev)
1500 1494
1501 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1495 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1502 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1496 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1503 db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1504 1497
1505 if (db->addr_res == NULL || db->data_res == NULL || 1498 if (!db->addr_res || !db->data_res) {
1506 db->irq_res == NULL) { 1499 dev_err(db->dev, "insufficient resources addr=%p data=%p\n",
1507 dev_err(db->dev, "insufficient resources\n"); 1500 db->addr_res, db->data_res);
1508 ret = -ENOENT; 1501 ret = -ENOENT;
1509 goto out; 1502 goto out;
1510 } 1503 }
1511 1504
1505 ndev->irq = platform_get_irq(pdev, 0);
1506 if (ndev->irq < 0) {
1507 dev_err(db->dev, "interrupt resource unavailable: %d\n",
1508 ndev->irq);
1509 ret = ndev->irq;
1510 goto out;
1511 }
1512
1512 db->irq_wake = platform_get_irq(pdev, 1); 1513 db->irq_wake = platform_get_irq(pdev, 1);
1513 if (db->irq_wake >= 0) { 1514 if (db->irq_wake >= 0) {
1514 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake); 1515 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
@@ -1570,7 +1571,6 @@ dm9000_probe(struct platform_device *pdev)
1570 1571
1571 /* fill in parameters for net-dev structure */ 1572 /* fill in parameters for net-dev structure */
1572 ndev->base_addr = (unsigned long)db->io_addr; 1573 ndev->base_addr = (unsigned long)db->io_addr;
1573 ndev->irq = db->irq_res->start;
1574 1574
1575 /* ensure at least we have a default set of IO routines */ 1575 /* ensure at least we have a default set of IO routines */
1576 dm9000_set_io(db, iosize); 1576 dm9000_set_io(db, iosize);
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index a7139f588ad2..678f5018d0be 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -469,8 +469,8 @@ static int fmvj18x_config(struct pcmcia_device *link)
469 goto failed; 469 goto failed;
470 } 470 }
471 /* Read MACID from CIS */ 471 /* Read MACID from CIS */
472 for (i = 5; i < 11; i++) 472 for (i = 0; i < 6; i++)
473 dev->dev_addr[i] = buf[i]; 473 dev->dev_addr[i] = buf[i + 5];
474 kfree(buf); 474 kfree(buf);
475 } else { 475 } else {
476 if (pcmcia_get_mac_from_cis(link, dev)) 476 if (pcmcia_get_mac_from_cis(link, dev))
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 662c2ee268c7..b0ae69f84493 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -370,6 +370,11 @@ struct mvneta_port {
370 struct net_device *dev; 370 struct net_device *dev;
371 struct notifier_block cpu_notifier; 371 struct notifier_block cpu_notifier;
372 int rxq_def; 372 int rxq_def;
373 /* Protect the access to the percpu interrupt registers,
374 * ensuring that the configuration remains coherent.
375 */
376 spinlock_t lock;
377 bool is_stopped;
373 378
374 /* Core clock */ 379 /* Core clock */
375 struct clk *clk; 380 struct clk *clk;
@@ -1038,6 +1043,43 @@ static void mvneta_set_autoneg(struct mvneta_port *pp, int enable)
1038 } 1043 }
1039} 1044}
1040 1045
1046static void mvneta_percpu_unmask_interrupt(void *arg)
1047{
1048 struct mvneta_port *pp = arg;
1049
1050 /* All the queue are unmasked, but actually only the ones
1051 * mapped to this CPU will be unmasked
1052 */
1053 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1054 MVNETA_RX_INTR_MASK_ALL |
1055 MVNETA_TX_INTR_MASK_ALL |
1056 MVNETA_MISCINTR_INTR_MASK);
1057}
1058
1059static void mvneta_percpu_mask_interrupt(void *arg)
1060{
1061 struct mvneta_port *pp = arg;
1062
1063 /* All the queue are masked, but actually only the ones
1064 * mapped to this CPU will be masked
1065 */
1066 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1067 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1068 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1069}
1070
1071static void mvneta_percpu_clear_intr_cause(void *arg)
1072{
1073 struct mvneta_port *pp = arg;
1074
1075 /* All the queue are cleared, but actually only the ones
1076 * mapped to this CPU will be cleared
1077 */
1078 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1079 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1080 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1081}
1082
1041/* This method sets defaults to the NETA port: 1083/* This method sets defaults to the NETA port:
1042 * Clears interrupt Cause and Mask registers. 1084 * Clears interrupt Cause and Mask registers.
1043 * Clears all MAC tables. 1085 * Clears all MAC tables.
@@ -1055,14 +1097,10 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
1055 int max_cpu = num_present_cpus(); 1097 int max_cpu = num_present_cpus();
1056 1098
1057 /* Clear all Cause registers */ 1099 /* Clear all Cause registers */
1058 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); 1100 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1059 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1060 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1061 1101
1062 /* Mask all interrupts */ 1102 /* Mask all interrupts */
1063 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 1103 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1064 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1065 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1066 mvreg_write(pp, MVNETA_INTR_ENABLE, 0); 1104 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1067 1105
1068 /* Enable MBUS Retry bit16 */ 1106 /* Enable MBUS Retry bit16 */
@@ -2528,34 +2566,9 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
2528 return 0; 2566 return 0;
2529} 2567}
2530 2568
2531static void mvneta_percpu_unmask_interrupt(void *arg)
2532{
2533 struct mvneta_port *pp = arg;
2534
2535 /* All the queue are unmasked, but actually only the ones
2536 * maped to this CPU will be unmasked
2537 */
2538 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2539 MVNETA_RX_INTR_MASK_ALL |
2540 MVNETA_TX_INTR_MASK_ALL |
2541 MVNETA_MISCINTR_INTR_MASK);
2542}
2543
2544static void mvneta_percpu_mask_interrupt(void *arg)
2545{
2546 struct mvneta_port *pp = arg;
2547
2548 /* All the queue are masked, but actually only the ones
2549 * maped to this CPU will be masked
2550 */
2551 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2552 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2553 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2554}
2555
2556static void mvneta_start_dev(struct mvneta_port *pp) 2569static void mvneta_start_dev(struct mvneta_port *pp)
2557{ 2570{
2558 unsigned int cpu; 2571 int cpu;
2559 2572
2560 mvneta_max_rx_size_set(pp, pp->pkt_size); 2573 mvneta_max_rx_size_set(pp, pp->pkt_size);
2561 mvneta_txq_max_tx_size_set(pp, pp->pkt_size); 2574 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
@@ -2564,16 +2577,15 @@ static void mvneta_start_dev(struct mvneta_port *pp)
2564 mvneta_port_enable(pp); 2577 mvneta_port_enable(pp);
2565 2578
2566 /* Enable polling on the port */ 2579 /* Enable polling on the port */
2567 for_each_present_cpu(cpu) { 2580 for_each_online_cpu(cpu) {
2568 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 2581 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2569 2582
2570 napi_enable(&port->napi); 2583 napi_enable(&port->napi);
2571 } 2584 }
2572 2585
2573 /* Unmask interrupts. It has to be done from each CPU */ 2586 /* Unmask interrupts. It has to be done from each CPU */
2574 for_each_online_cpu(cpu) 2587 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
2575 smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, 2588
2576 pp, true);
2577 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 2589 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2578 MVNETA_CAUSE_PHY_STATUS_CHANGE | 2590 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2579 MVNETA_CAUSE_LINK_CHANGE | 2591 MVNETA_CAUSE_LINK_CHANGE |
@@ -2589,7 +2601,7 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
2589 2601
2590 phy_stop(pp->phy_dev); 2602 phy_stop(pp->phy_dev);
2591 2603
2592 for_each_present_cpu(cpu) { 2604 for_each_online_cpu(cpu) {
2593 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 2605 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2594 2606
2595 napi_disable(&port->napi); 2607 napi_disable(&port->napi);
@@ -2604,13 +2616,10 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
2604 mvneta_port_disable(pp); 2616 mvneta_port_disable(pp);
2605 2617
2606 /* Clear all ethernet port interrupts */ 2618 /* Clear all ethernet port interrupts */
2607 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 2619 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
2608 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
2609 2620
2610 /* Mask all ethernet port interrupts */ 2621 /* Mask all ethernet port interrupts */
2611 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 2622 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
2612 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2613 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2614 2623
2615 mvneta_tx_reset(pp); 2624 mvneta_tx_reset(pp);
2616 mvneta_rx_reset(pp); 2625 mvneta_rx_reset(pp);
@@ -2847,11 +2856,20 @@ static void mvneta_percpu_disable(void *arg)
2847 disable_percpu_irq(pp->dev->irq); 2856 disable_percpu_irq(pp->dev->irq);
2848} 2857}
2849 2858
2859/* Electing a CPU must be done in an atomic way: it should be done
2860 * after or before the removal/insertion of a CPU and this function is
2861 * not reentrant.
2862 */
2850static void mvneta_percpu_elect(struct mvneta_port *pp) 2863static void mvneta_percpu_elect(struct mvneta_port *pp)
2851{ 2864{
2852 int online_cpu_idx, max_cpu, cpu, i = 0; 2865 int elected_cpu = 0, max_cpu, cpu, i = 0;
2866
2867 /* Use the cpu associated to the rxq when it is online, in all
2868 * the other cases, use the cpu 0 which can't be offline.
2869 */
2870 if (cpu_online(pp->rxq_def))
2871 elected_cpu = pp->rxq_def;
2853 2872
2854 online_cpu_idx = pp->rxq_def % num_online_cpus();
2855 max_cpu = num_present_cpus(); 2873 max_cpu = num_present_cpus();
2856 2874
2857 for_each_online_cpu(cpu) { 2875 for_each_online_cpu(cpu) {
@@ -2862,7 +2880,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
2862 if ((rxq % max_cpu) == cpu) 2880 if ((rxq % max_cpu) == cpu)
2863 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); 2881 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
2864 2882
2865 if (i == online_cpu_idx) 2883 if (cpu == elected_cpu)
2866 /* Map the default receive queue queue to the 2884 /* Map the default receive queue queue to the
2867 * elected CPU 2885 * elected CPU
2868 */ 2886 */
@@ -2873,7 +2891,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
2873 * the CPU bound to the default RX queue 2891 * the CPU bound to the default RX queue
2874 */ 2892 */
2875 if (txq_number == 1) 2893 if (txq_number == 1)
2876 txq_map = (i == online_cpu_idx) ? 2894 txq_map = (cpu == elected_cpu) ?
2877 MVNETA_CPU_TXQ_ACCESS(1) : 0; 2895 MVNETA_CPU_TXQ_ACCESS(1) : 0;
2878 else 2896 else
2879 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & 2897 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
@@ -2902,6 +2920,14 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2902 switch (action) { 2920 switch (action) {
2903 case CPU_ONLINE: 2921 case CPU_ONLINE:
2904 case CPU_ONLINE_FROZEN: 2922 case CPU_ONLINE_FROZEN:
2923 spin_lock(&pp->lock);
2924 /* Configuring the driver for a new CPU while the
2925 * driver is stopping is racy, so just avoid it.
2926 */
2927 if (pp->is_stopped) {
2928 spin_unlock(&pp->lock);
2929 break;
2930 }
2905 netif_tx_stop_all_queues(pp->dev); 2931 netif_tx_stop_all_queues(pp->dev);
2906 2932
2907 /* We have to synchronise on tha napi of each CPU 2933 /* We have to synchronise on tha napi of each CPU
@@ -2917,9 +2943,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2917 } 2943 }
2918 2944
2919 /* Mask all ethernet port interrupts */ 2945 /* Mask all ethernet port interrupts */
2920 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 2946 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
2921 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2922 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2923 napi_enable(&port->napi); 2947 napi_enable(&port->napi);
2924 2948
2925 2949
@@ -2934,27 +2958,25 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2934 */ 2958 */
2935 mvneta_percpu_elect(pp); 2959 mvneta_percpu_elect(pp);
2936 2960
2937 /* Unmask all ethernet port interrupts, as this 2961 /* Unmask all ethernet port interrupts */
2938 * notifier is called for each CPU then the CPU to 2962 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
2939 * Queue mapping is applied
2940 */
2941 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2942 MVNETA_RX_INTR_MASK(rxq_number) |
2943 MVNETA_TX_INTR_MASK(txq_number) |
2944 MVNETA_MISCINTR_INTR_MASK);
2945 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 2963 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2946 MVNETA_CAUSE_PHY_STATUS_CHANGE | 2964 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2947 MVNETA_CAUSE_LINK_CHANGE | 2965 MVNETA_CAUSE_LINK_CHANGE |
2948 MVNETA_CAUSE_PSC_SYNC_CHANGE); 2966 MVNETA_CAUSE_PSC_SYNC_CHANGE);
2949 netif_tx_start_all_queues(pp->dev); 2967 netif_tx_start_all_queues(pp->dev);
2968 spin_unlock(&pp->lock);
2950 break; 2969 break;
2951 case CPU_DOWN_PREPARE: 2970 case CPU_DOWN_PREPARE:
2952 case CPU_DOWN_PREPARE_FROZEN: 2971 case CPU_DOWN_PREPARE_FROZEN:
2953 netif_tx_stop_all_queues(pp->dev); 2972 netif_tx_stop_all_queues(pp->dev);
2973 /* Thanks to this lock we are sure that any pending
2974 * cpu election is done
2975 */
2976 spin_lock(&pp->lock);
2954 /* Mask all ethernet port interrupts */ 2977 /* Mask all ethernet port interrupts */
2955 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 2978 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
2956 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); 2979 spin_unlock(&pp->lock);
2957 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2958 2980
2959 napi_synchronize(&port->napi); 2981 napi_synchronize(&port->napi);
2960 napi_disable(&port->napi); 2982 napi_disable(&port->napi);
@@ -2968,12 +2990,11 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2968 case CPU_DEAD: 2990 case CPU_DEAD:
2969 case CPU_DEAD_FROZEN: 2991 case CPU_DEAD_FROZEN:
2970 /* Check if a new CPU must be elected now this on is down */ 2992 /* Check if a new CPU must be elected now this on is down */
2993 spin_lock(&pp->lock);
2971 mvneta_percpu_elect(pp); 2994 mvneta_percpu_elect(pp);
2995 spin_unlock(&pp->lock);
2972 /* Unmask all ethernet port interrupts */ 2996 /* Unmask all ethernet port interrupts */
2973 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 2997 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
2974 MVNETA_RX_INTR_MASK(rxq_number) |
2975 MVNETA_TX_INTR_MASK(txq_number) |
2976 MVNETA_MISCINTR_INTR_MASK);
2977 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 2998 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2978 MVNETA_CAUSE_PHY_STATUS_CHANGE | 2999 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2979 MVNETA_CAUSE_LINK_CHANGE | 3000 MVNETA_CAUSE_LINK_CHANGE |
@@ -2988,7 +3009,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2988static int mvneta_open(struct net_device *dev) 3009static int mvneta_open(struct net_device *dev)
2989{ 3010{
2990 struct mvneta_port *pp = netdev_priv(dev); 3011 struct mvneta_port *pp = netdev_priv(dev);
2991 int ret, cpu; 3012 int ret;
2992 3013
2993 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 3014 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2994 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + 3015 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
@@ -3010,22 +3031,12 @@ static int mvneta_open(struct net_device *dev)
3010 goto err_cleanup_txqs; 3031 goto err_cleanup_txqs;
3011 } 3032 }
3012 3033
3013 /* Even though the documentation says that request_percpu_irq
3014 * doesn't enable the interrupts automatically, it actually
3015 * does so on the local CPU.
3016 *
3017 * Make sure it's disabled.
3018 */
3019 mvneta_percpu_disable(pp);
3020
3021 /* Enable per-CPU interrupt on all the CPU to handle our RX 3034 /* Enable per-CPU interrupt on all the CPU to handle our RX
3022 * queue interrupts 3035 * queue interrupts
3023 */ 3036 */
3024 for_each_online_cpu(cpu) 3037 on_each_cpu(mvneta_percpu_enable, pp, true);
3025 smp_call_function_single(cpu, mvneta_percpu_enable,
3026 pp, true);
3027
3028 3038
3039 pp->is_stopped = false;
3029 /* Register a CPU notifier to handle the case where our CPU 3040 /* Register a CPU notifier to handle the case where our CPU
3030 * might be taken offline. 3041 * might be taken offline.
3031 */ 3042 */
@@ -3057,13 +3068,20 @@ err_cleanup_rxqs:
3057static int mvneta_stop(struct net_device *dev) 3068static int mvneta_stop(struct net_device *dev)
3058{ 3069{
3059 struct mvneta_port *pp = netdev_priv(dev); 3070 struct mvneta_port *pp = netdev_priv(dev);
3060 int cpu;
3061 3071
3072 /* Inform that we are stopping so we don't want to setup the
3073 * driver for new CPUs in the notifiers
3074 */
3075 spin_lock(&pp->lock);
3076 pp->is_stopped = true;
3062 mvneta_stop_dev(pp); 3077 mvneta_stop_dev(pp);
3063 mvneta_mdio_remove(pp); 3078 mvneta_mdio_remove(pp);
3064 unregister_cpu_notifier(&pp->cpu_notifier); 3079 unregister_cpu_notifier(&pp->cpu_notifier);
3065 for_each_present_cpu(cpu) 3080 /* Now that the notifier are unregistered, we can release le
3066 smp_call_function_single(cpu, mvneta_percpu_disable, pp, true); 3081 * lock
3082 */
3083 spin_unlock(&pp->lock);
3084 on_each_cpu(mvneta_percpu_disable, pp, true);
3067 free_percpu_irq(dev->irq, pp->ports); 3085 free_percpu_irq(dev->irq, pp->ports);
3068 mvneta_cleanup_rxqs(pp); 3086 mvneta_cleanup_rxqs(pp);
3069 mvneta_cleanup_txqs(pp); 3087 mvneta_cleanup_txqs(pp);
@@ -3312,9 +3330,7 @@ static int mvneta_config_rss(struct mvneta_port *pp)
3312 3330
3313 netif_tx_stop_all_queues(pp->dev); 3331 netif_tx_stop_all_queues(pp->dev);
3314 3332
3315 for_each_online_cpu(cpu) 3333 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3316 smp_call_function_single(cpu, mvneta_percpu_mask_interrupt,
3317 pp, true);
3318 3334
3319 /* We have to synchronise on the napi of each CPU */ 3335 /* We have to synchronise on the napi of each CPU */
3320 for_each_online_cpu(cpu) { 3336 for_each_online_cpu(cpu) {
@@ -3335,7 +3351,9 @@ static int mvneta_config_rss(struct mvneta_port *pp)
3335 mvreg_write(pp, MVNETA_PORT_CONFIG, val); 3351 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
3336 3352
3337 /* Update the elected CPU matching the new rxq_def */ 3353 /* Update the elected CPU matching the new rxq_def */
3354 spin_lock(&pp->lock);
3338 mvneta_percpu_elect(pp); 3355 mvneta_percpu_elect(pp);
3356 spin_unlock(&pp->lock);
3339 3357
3340 /* We have to synchronise on the napi of each CPU */ 3358 /* We have to synchronise on the napi of each CPU */
3341 for_each_online_cpu(cpu) { 3359 for_each_online_cpu(cpu) {
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index a4beccf1fd46..c797971aefab 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -3061,7 +3061,7 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3061 3061
3062 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 3062 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3063 if (!pe) 3063 if (!pe)
3064 return -1; 3064 return -ENOMEM;
3065 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 3065 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3066 pe->index = tid; 3066 pe->index = tid;
3067 3067
@@ -3077,7 +3077,7 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3077 if (pmap == 0) { 3077 if (pmap == 0) {
3078 if (add) { 3078 if (add) {
3079 kfree(pe); 3079 kfree(pe);
3080 return -1; 3080 return -EINVAL;
3081 } 3081 }
3082 mvpp2_prs_hw_inv(priv, pe->index); 3082 mvpp2_prs_hw_inv(priv, pe->index);
3083 priv->prs_shadow[pe->index].valid = false; 3083 priv->prs_shadow[pe->index].valid = false;
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 715de8affcc9..c7e939945259 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -182,10 +182,17 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist)
182 err = mlx4_reset_slave(dev); 182 err = mlx4_reset_slave(dev);
183 else 183 else
184 err = mlx4_reset_master(dev); 184 err = mlx4_reset_master(dev);
185 BUG_ON(err != 0);
186 185
186 if (!err) {
187 mlx4_err(dev, "device was reset successfully\n");
188 } else {
189 /* EEH could have disabled the PCI channel during reset. That's
190 * recoverable and the PCI error flow will handle it.
191 */
192 if (!pci_channel_offline(dev->persist->pdev))
193 BUG_ON(1);
194 }
187 dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR; 195 dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR;
188 mlx4_err(dev, "device was reset successfully\n");
189 mutex_unlock(&persist->device_state_mutex); 196 mutex_unlock(&persist->device_state_mutex);
190 197
191 /* At that step HW was already reset, now notify clients */ 198 /* At that step HW was already reset, now notify clients */
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 3348e646db70..a849da92f857 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -318,7 +318,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
318 if (timestamp_en) 318 if (timestamp_en)
319 cq_context->flags |= cpu_to_be32(1 << 19); 319 cq_context->flags |= cpu_to_be32(1 << 19);
320 320
321 cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); 321 cq_context->logsize_usrpage =
322 cpu_to_be32((ilog2(nent) << 24) |
323 mlx4_to_hw_uar_index(dev, uar->index));
322 cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn; 324 cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;
323 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; 325 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
324 326
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 038f9ce391e6..1494997c4f7e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -236,6 +236,24 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
236 .enable = mlx4_en_phc_enable, 236 .enable = mlx4_en_phc_enable,
237}; 237};
238 238
239#define MLX4_EN_WRAP_AROUND_SEC 10ULL
240
241/* This function calculates the max shift that enables the user range
242 * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
243 */
244static u32 freq_to_shift(u16 freq)
245{
246 u32 freq_khz = freq * 1000;
247 u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
248 u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
249 max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
250 /* calculate max possible multiplier in order to fit in 64bit */
251 u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
252
253 /* This comes from the reverse of clocksource_khz2mult */
254 return ilog2(div_u64(max_mul * freq_khz, 1000000));
255}
256
239void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) 257void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
240{ 258{
241 struct mlx4_dev *dev = mdev->dev; 259 struct mlx4_dev *dev = mdev->dev;
@@ -254,12 +272,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
254 memset(&mdev->cycles, 0, sizeof(mdev->cycles)); 272 memset(&mdev->cycles, 0, sizeof(mdev->cycles));
255 mdev->cycles.read = mlx4_en_read_clock; 273 mdev->cycles.read = mlx4_en_read_clock;
256 mdev->cycles.mask = CLOCKSOURCE_MASK(48); 274 mdev->cycles.mask = CLOCKSOURCE_MASK(48);
257 /* Using shift to make calculation more accurate. Since current HW 275 mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
258 * clock frequency is 427 MHz, and cycles are given using a 48 bits
259 * register, the biggest shift when calculating using u64, is 14
260 * (max_cycles * multiplier < 2^64)
261 */
262 mdev->cycles.shift = 14;
263 mdev->cycles.mult = 276 mdev->cycles.mult =
264 clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift); 277 clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
265 mdev->nominal_c_mult = mdev->cycles.mult; 278 mdev->nominal_c_mult = mdev->cycles.mult;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 0c7e3f69a73b..f191a1612589 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2344,8 +2344,6 @@ out:
2344 /* set offloads */ 2344 /* set offloads */
2345 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 2345 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2346 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; 2346 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
2347 priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
2348 priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL;
2349} 2347}
2350 2348
2351static void mlx4_en_del_vxlan_offloads(struct work_struct *work) 2349static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2356,8 +2354,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2356 /* unset offloads */ 2354 /* unset offloads */
2357 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 2355 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2358 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL); 2356 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
2359 priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
2360 priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
2361 2357
2362 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, 2358 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2363 VXLAN_STEER_BY_OUTER_MAC, 0); 2359 VXLAN_STEER_BY_OUTER_MAC, 0);
@@ -2980,6 +2976,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2980 priv->rss_hash_fn = ETH_RSS_HASH_TOP; 2976 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
2981 } 2977 }
2982 2978
2979 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
2980 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
2981 dev->features |= NETIF_F_GSO_UDP_TUNNEL;
2982 }
2983
2983 mdev->pndev[port] = dev; 2984 mdev->pndev[port] = dev;
2984 mdev->upper[port] = NULL; 2985 mdev->upper[port] = NULL;
2985 2986
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index ee99e67187f5..3904b5fc0b7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -238,11 +238,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
238 stats->collisions = 0; 238 stats->collisions = 0;
239 stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP); 239 stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
240 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); 240 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
241 stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); 241 stats->rx_over_errors = 0;
242 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); 242 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
243 stats->rx_frame_errors = 0; 243 stats->rx_frame_errors = 0;
244 stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); 244 stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
245 stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); 245 stats->rx_missed_errors = 0;
246 stats->tx_aborted_errors = 0; 246 stats->tx_aborted_errors = 0;
247 stats->tx_carrier_errors = 0; 247 stats->tx_carrier_errors = 0;
248 stats->tx_fifo_errors = 0; 248 stats->tx_fifo_errors = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 12aab5a659d3..02e925d6f734 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -58,7 +58,8 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
58 } else { 58 } else {
59 context->sq_size_stride = ilog2(TXBB_SIZE) - 4; 59 context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
60 } 60 }
61 context->usr_page = cpu_to_be32(mdev->priv_uar.index); 61 context->usr_page = cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev,
62 mdev->priv_uar.index));
62 context->local_qpn = cpu_to_be32(qpn); 63 context->local_qpn = cpu_to_be32(qpn);
63 context->pri_path.ackto = 1 & 0x07; 64 context->pri_path.ackto = 1 & 0x07;
64 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; 65 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 4421bf5463f6..e0946ab22010 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -213,7 +213,9 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
213 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 213 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
214 ring->cqn, user_prio, &ring->context); 214 ring->cqn, user_prio, &ring->context);
215 if (ring->bf_alloced) 215 if (ring->bf_alloced)
216 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); 216 ring->context.usr_page =
217 cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev,
218 ring->bf.uar->index));
217 219
218 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, 220 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
219 &ring->qp, &ring->qp_state); 221 &ring->qp, &ring->qp_state);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 4696053165f8..f613977455e0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -940,9 +940,10 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
940 940
941 if (!priv->eq_table.uar_map[index]) { 941 if (!priv->eq_table.uar_map[index]) {
942 priv->eq_table.uar_map[index] = 942 priv->eq_table.uar_map[index] =
943 ioremap(pci_resource_start(dev->persist->pdev, 2) + 943 ioremap(
944 ((eq->eqn / 4) << PAGE_SHIFT), 944 pci_resource_start(dev->persist->pdev, 2) +
945 PAGE_SIZE); 945 ((eq->eqn / 4) << (dev->uar_page_shift)),
946 (1 << (dev->uar_page_shift)));
946 if (!priv->eq_table.uar_map[index]) { 947 if (!priv->eq_table.uar_map[index]) {
947 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n", 948 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
948 eq->eqn); 949 eq->eqn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index f1b6d219e445..2cc3c626c3fe 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -168,6 +168,20 @@ struct mlx4_port_config {
168 168
169static atomic_t pf_loading = ATOMIC_INIT(0); 169static atomic_t pf_loading = ATOMIC_INIT(0);
170 170
171static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
172 struct mlx4_dev_cap *dev_cap)
173{
174 /* The reserved_uars is calculated by system page size unit.
175 * Therefore, adjustment is added when the uar page size is less
176 * than the system page size
177 */
178 dev->caps.reserved_uars =
179 max_t(int,
180 mlx4_get_num_reserved_uar(dev),
181 dev_cap->reserved_uars /
182 (1 << (PAGE_SHIFT - dev->uar_page_shift)));
183}
184
171int mlx4_check_port_params(struct mlx4_dev *dev, 185int mlx4_check_port_params(struct mlx4_dev *dev,
172 enum mlx4_port_type *port_type) 186 enum mlx4_port_type *port_type)
173{ 187{
@@ -386,8 +400,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
386 dev->caps.reserved_mtts = dev_cap->reserved_mtts; 400 dev->caps.reserved_mtts = dev_cap->reserved_mtts;
387 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 401 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
388 402
389 /* The first 128 UARs are used for EQ doorbells */
390 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars);
391 dev->caps.reserved_pds = dev_cap->reserved_pds; 403 dev->caps.reserved_pds = dev_cap->reserved_pds;
392 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 404 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
393 dev_cap->reserved_xrcds : 0; 405 dev_cap->reserved_xrcds : 0;
@@ -405,6 +417,15 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
405 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 417 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
406 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 418 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
407 419
420 /* Save uar page shift */
421 if (!mlx4_is_slave(dev)) {
422 /* Virtual PCI function needs to determine UAR page size from
423 * firmware. Only master PCI function can set the uar page size
424 */
425 dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
426 mlx4_set_num_reserved_uars(dev, dev_cap);
427 }
428
408 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) { 429 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
409 struct mlx4_init_hca_param hca_param; 430 struct mlx4_init_hca_param hca_param;
410 431
@@ -815,16 +836,25 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
815 return -ENODEV; 836 return -ENODEV;
816 } 837 }
817 838
818 /* slave gets uar page size from QUERY_HCA fw command */ 839 /* Set uar_page_shift for VF */
819 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); 840 dev->uar_page_shift = hca_param.uar_page_sz + 12;
820 841
821 /* TODO: relax this assumption */ 842 /* Make sure the master uar page size is valid */
822 if (dev->caps.uar_page_size != PAGE_SIZE) { 843 if (dev->uar_page_shift > PAGE_SHIFT) {
823 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n", 844 mlx4_err(dev,
824 dev->caps.uar_page_size, PAGE_SIZE); 845 "Invalid configuration: uar page size is larger than system page size\n");
825 return -ENODEV; 846 return -ENODEV;
826 } 847 }
827 848
849 /* Set reserved_uars based on the uar_page_shift */
850 mlx4_set_num_reserved_uars(dev, &dev_cap);
851
852 /* Although uar page size in FW differs from system page size,
853 * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core)
854 * still works with assumption that uar page size == system page size
855 */
856 dev->caps.uar_page_size = PAGE_SIZE;
857
828 memset(&func_cap, 0, sizeof(func_cap)); 858 memset(&func_cap, 0, sizeof(func_cap));
829 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); 859 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
830 if (err) { 860 if (err) {
@@ -2179,8 +2209,12 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
2179 2209
2180 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; 2210 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
2181 2211
2182 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 2212 /* Always set UAR page size 4KB, set log_uar_sz accordingly */
2183 init_hca.uar_page_sz = PAGE_SHIFT - 12; 2213 init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
2214 PAGE_SHIFT -
2215 DEFAULT_UAR_PAGE_SHIFT;
2216 init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
2217
2184 init_hca.mw_enabled = 0; 2218 init_hca.mw_enabled = 0;
2185 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || 2219 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2186 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) 2220 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 609c59dc854e..b3cc3ab63799 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -269,9 +269,15 @@ EXPORT_SYMBOL_GPL(mlx4_bf_free);
269 269
270int mlx4_init_uar_table(struct mlx4_dev *dev) 270int mlx4_init_uar_table(struct mlx4_dev *dev)
271{ 271{
272 if (dev->caps.num_uars <= 128) { 272 int num_reserved_uar = mlx4_get_num_reserved_uar(dev);
273 mlx4_err(dev, "Only %d UAR pages (need more than 128)\n", 273
274 dev->caps.num_uars); 274 mlx4_dbg(dev, "uar_page_shift = %d", dev->uar_page_shift);
275 mlx4_dbg(dev, "Effective reserved_uars=%d", dev->caps.reserved_uars);
276
277 if (dev->caps.num_uars <= num_reserved_uar) {
278 mlx4_err(
279 dev, "Only %d UAR pages (need more than %d)\n",
280 dev->caps.num_uars, num_reserved_uar);
275 mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n"); 281 mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n");
276 return -ENODEV; 282 return -ENODEV;
277 } 283 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index b46dbe29ef6c..25ce1b030a00 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -915,11 +915,13 @@ static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
915 915
916 spin_lock_irq(mlx4_tlock(dev)); 916 spin_lock_irq(mlx4_tlock(dev));
917 r = find_res(dev, counter_index, RES_COUNTER); 917 r = find_res(dev, counter_index, RES_COUNTER);
918 if (!r || r->owner != slave) 918 if (!r || r->owner != slave) {
919 ret = -EINVAL; 919 ret = -EINVAL;
920 counter = container_of(r, struct res_counter, com); 920 } else {
921 if (!counter->port) 921 counter = container_of(r, struct res_counter, com);
922 counter->port = port; 922 if (!counter->port)
923 counter->port = port;
924 }
923 925
924 spin_unlock_irq(mlx4_tlock(dev)); 926 spin_unlock_irq(mlx4_tlock(dev));
925 return ret; 927 return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 6a3e430f1062..d4e1c3045200 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2024,18 +2024,37 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
2024 vf_stats); 2024 vf_stats);
2025} 2025}
2026 2026
2027static struct net_device_ops mlx5e_netdev_ops = { 2027static const struct net_device_ops mlx5e_netdev_ops_basic = {
2028 .ndo_open = mlx5e_open, 2028 .ndo_open = mlx5e_open,
2029 .ndo_stop = mlx5e_close, 2029 .ndo_stop = mlx5e_close,
2030 .ndo_start_xmit = mlx5e_xmit, 2030 .ndo_start_xmit = mlx5e_xmit,
2031 .ndo_get_stats64 = mlx5e_get_stats, 2031 .ndo_get_stats64 = mlx5e_get_stats,
2032 .ndo_set_rx_mode = mlx5e_set_rx_mode, 2032 .ndo_set_rx_mode = mlx5e_set_rx_mode,
2033 .ndo_set_mac_address = mlx5e_set_mac, 2033 .ndo_set_mac_address = mlx5e_set_mac,
2034 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid, 2034 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
2035 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, 2035 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
2036 .ndo_set_features = mlx5e_set_features, 2036 .ndo_set_features = mlx5e_set_features,
2037 .ndo_change_mtu = mlx5e_change_mtu, 2037 .ndo_change_mtu = mlx5e_change_mtu,
2038 .ndo_do_ioctl = mlx5e_ioctl, 2038 .ndo_do_ioctl = mlx5e_ioctl,
2039};
2040
2041static const struct net_device_ops mlx5e_netdev_ops_sriov = {
2042 .ndo_open = mlx5e_open,
2043 .ndo_stop = mlx5e_close,
2044 .ndo_start_xmit = mlx5e_xmit,
2045 .ndo_get_stats64 = mlx5e_get_stats,
2046 .ndo_set_rx_mode = mlx5e_set_rx_mode,
2047 .ndo_set_mac_address = mlx5e_set_mac,
2048 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
2049 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
2050 .ndo_set_features = mlx5e_set_features,
2051 .ndo_change_mtu = mlx5e_change_mtu,
2052 .ndo_do_ioctl = mlx5e_ioctl,
2053 .ndo_set_vf_mac = mlx5e_set_vf_mac,
2054 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
2055 .ndo_get_vf_config = mlx5e_get_vf_config,
2056 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
2057 .ndo_get_vf_stats = mlx5e_get_vf_stats,
2039}; 2058};
2040 2059
2041static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) 2060static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -2137,18 +2156,11 @@ static void mlx5e_build_netdev(struct net_device *netdev)
2137 2156
2138 SET_NETDEV_DEV(netdev, &mdev->pdev->dev); 2157 SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
2139 2158
2140 if (priv->params.num_tc > 1) 2159 if (MLX5_CAP_GEN(mdev, vport_group_manager))
2141 mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue; 2160 netdev->netdev_ops = &mlx5e_netdev_ops_sriov;
2142 2161 else
2143 if (MLX5_CAP_GEN(mdev, vport_group_manager)) { 2162 netdev->netdev_ops = &mlx5e_netdev_ops_basic;
2144 mlx5e_netdev_ops.ndo_set_vf_mac = mlx5e_set_vf_mac;
2145 mlx5e_netdev_ops.ndo_set_vf_vlan = mlx5e_set_vf_vlan;
2146 mlx5e_netdev_ops.ndo_get_vf_config = mlx5e_get_vf_config;
2147 mlx5e_netdev_ops.ndo_set_vf_link_state = mlx5e_set_vf_link_state;
2148 mlx5e_netdev_ops.ndo_get_vf_stats = mlx5e_get_vf_stats;
2149 }
2150 2163
2151 netdev->netdev_ops = &mlx5e_netdev_ops;
2152 netdev->watchdog_timeo = 15 * HZ; 2164 netdev->watchdog_timeo = 15 * HZ;
2153 2165
2154 netdev->ethtool_ops = &mlx5e_ethtool_ops; 2166 netdev->ethtool_ops = &mlx5e_ethtool_ops;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index 726f5435b32f..ae65b9940aed 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -49,7 +49,7 @@
49#define MLXSW_PORT_MID 0xd000 49#define MLXSW_PORT_MID 0xd000
50 50
51#define MLXSW_PORT_MAX_PHY_PORTS 0x40 51#define MLXSW_PORT_MAX_PHY_PORTS 0x40
52#define MLXSW_PORT_MAX_PORTS MLXSW_PORT_MAX_PHY_PORTS 52#define MLXSW_PORT_MAX_PORTS (MLXSW_PORT_MAX_PHY_PORTS + 1)
53 53
54#define MLXSW_PORT_DEVID_BITS_OFFSET 10 54#define MLXSW_PORT_DEVID_BITS_OFFSET 10
55#define MLXSW_PORT_PHY_BITS_OFFSET 4 55#define MLXSW_PORT_PHY_BITS_OFFSET 4
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index bb77e2207804..ffe4c0305733 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -873,6 +873,62 @@ static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port,
873 } 873 }
874} 874}
875 875
876/* SPAFT - Switch Port Acceptable Frame Types
877 * ------------------------------------------
878 * The Switch Port Acceptable Frame Types register configures the frame
879 * admittance of the port.
880 */
881#define MLXSW_REG_SPAFT_ID 0x2010
882#define MLXSW_REG_SPAFT_LEN 0x08
883
884static const struct mlxsw_reg_info mlxsw_reg_spaft = {
885 .id = MLXSW_REG_SPAFT_ID,
886 .len = MLXSW_REG_SPAFT_LEN,
887};
888
889/* reg_spaft_local_port
890 * Local port number.
891 * Access: Index
892 *
893 * Note: CPU port is not supported (all tag types are allowed).
894 */
895MLXSW_ITEM32(reg, spaft, local_port, 0x00, 16, 8);
896
897/* reg_spaft_sub_port
898 * Virtual port within the physical port.
899 * Should be set to 0 when virtual ports are not enabled on the port.
900 * Access: RW
901 */
902MLXSW_ITEM32(reg, spaft, sub_port, 0x00, 8, 8);
903
904/* reg_spaft_allow_untagged
905 * When set, untagged frames on the ingress are allowed (default).
906 * Access: RW
907 */
908MLXSW_ITEM32(reg, spaft, allow_untagged, 0x04, 31, 1);
909
910/* reg_spaft_allow_prio_tagged
911 * When set, priority tagged frames on the ingress are allowed (default).
912 * Access: RW
913 */
914MLXSW_ITEM32(reg, spaft, allow_prio_tagged, 0x04, 30, 1);
915
916/* reg_spaft_allow_tagged
917 * When set, tagged frames on the ingress are allowed (default).
918 * Access: RW
919 */
920MLXSW_ITEM32(reg, spaft, allow_tagged, 0x04, 29, 1);
921
922static inline void mlxsw_reg_spaft_pack(char *payload, u8 local_port,
923 bool allow_untagged)
924{
925 MLXSW_REG_ZERO(spaft, payload);
926 mlxsw_reg_spaft_local_port_set(payload, local_port);
927 mlxsw_reg_spaft_allow_untagged_set(payload, allow_untagged);
928 mlxsw_reg_spaft_allow_prio_tagged_set(payload, true);
929 mlxsw_reg_spaft_allow_tagged_set(payload, true);
930}
931
876/* SFGC - Switch Flooding Group Configuration 932/* SFGC - Switch Flooding Group Configuration
877 * ------------------------------------------ 933 * ------------------------------------------
878 * The following register controls the association of flooding tables and MIDs 934 * The following register controls the association of flooding tables and MIDs
@@ -3203,6 +3259,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
3203 return "SPVID"; 3259 return "SPVID";
3204 case MLXSW_REG_SPVM_ID: 3260 case MLXSW_REG_SPVM_ID:
3205 return "SPVM"; 3261 return "SPVM";
3262 case MLXSW_REG_SPAFT_ID:
3263 return "SPAFT";
3206 case MLXSW_REG_SFGC_ID: 3264 case MLXSW_REG_SFGC_ID:
3207 return "SFGC"; 3265 return "SFGC";
3208 case MLXSW_REG_SFTR_ID: 3266 case MLXSW_REG_SFTR_ID:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 217856bdd400..09ce451c283b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2123,6 +2123,8 @@ static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2123 if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port)) 2123 if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
2124 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); 2124 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2125 2125
2126 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2127
2126 mlxsw_sp_port->learning = 0; 2128 mlxsw_sp_port->learning = 0;
2127 mlxsw_sp_port->learning_sync = 0; 2129 mlxsw_sp_port->learning_sync = 0;
2128 mlxsw_sp_port->uc_flood = 0; 2130 mlxsw_sp_port->uc_flood = 0;
@@ -2746,6 +2748,13 @@ static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2746 goto err_vport_flood_set; 2748 goto err_vport_flood_set;
2747 } 2749 }
2748 2750
2751 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
2752 MLXSW_REG_SPMS_STATE_FORWARDING);
2753 if (err) {
2754 netdev_err(dev, "Failed to set STP state\n");
2755 goto err_port_stp_state_set;
2756 }
2757
2749 if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport)) 2758 if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
2750 netdev_err(dev, "Failed to flush FDB\n"); 2759 netdev_err(dev, "Failed to flush FDB\n");
2751 2760
@@ -2763,6 +2772,7 @@ static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2763 2772
2764 return 0; 2773 return 0;
2765 2774
2775err_port_stp_state_set:
2766err_vport_flood_set: 2776err_vport_flood_set:
2767err_port_vid_learning_set: 2777err_port_vid_learning_set:
2768err_port_vid_to_fid_validate: 2778err_port_vid_to_fid_validate:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 7f42eb1c320e..3b89ed2f3c76 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -254,5 +254,6 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
254int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, 254int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
255 bool set, bool only_uc); 255 bool set, bool only_uc);
256void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); 256void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
257int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
257 258
258#endif 259#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index e492ca2cdecd..7b56098acc58 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -370,7 +370,8 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
370 return err; 370 return err;
371} 371}
372 372
373static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 373static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
374 u16 vid)
374{ 375{
375 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 376 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
376 char spvid_pl[MLXSW_REG_SPVID_LEN]; 377 char spvid_pl[MLXSW_REG_SPVID_LEN];
@@ -379,6 +380,53 @@ static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
379 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 380 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
380} 381}
381 382
383static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
384 bool allow)
385{
386 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
387 char spaft_pl[MLXSW_REG_SPAFT_LEN];
388
389 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
390 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
391}
392
393int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
394{
395 struct net_device *dev = mlxsw_sp_port->dev;
396 int err;
397
398 if (!vid) {
399 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
400 if (err) {
401 netdev_err(dev, "Failed to disallow untagged traffic\n");
402 return err;
403 }
404 } else {
405 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
406 if (err) {
407 netdev_err(dev, "Failed to set PVID\n");
408 return err;
409 }
410
411 /* Only allow if not already allowed. */
412 if (!mlxsw_sp_port->pvid) {
413 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port,
414 true);
415 if (err) {
416 netdev_err(dev, "Failed to allow untagged traffic\n");
417 goto err_port_allow_untagged_set;
418 }
419 }
420 }
421
422 mlxsw_sp_port->pvid = vid;
423 return 0;
424
425err_port_allow_untagged_set:
426 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
427 return err;
428}
429
382static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid) 430static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
383{ 431{
384 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 432 char sfmr_pl[MLXSW_REG_SFMR_LEN];
@@ -540,7 +588,12 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
540 netdev_err(dev, "Unable to add PVID %d\n", vid_begin); 588 netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
541 goto err_port_pvid_set; 589 goto err_port_pvid_set;
542 } 590 }
543 mlxsw_sp_port->pvid = vid_begin; 591 } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) {
592 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
593 if (err) {
594 netdev_err(dev, "Unable to del PVID\n");
595 goto err_port_pvid_set;
596 }
544 } 597 }
545 598
546 /* Changing activity bits only if HW operation succeded */ 599 /* Changing activity bits only if HW operation succeded */
@@ -892,20 +945,18 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
892 return err; 945 return err;
893 } 946 }
894 947
948 if (init)
949 goto out;
950
895 pvid = mlxsw_sp_port->pvid; 951 pvid = mlxsw_sp_port->pvid;
896 if (pvid >= vid_begin && pvid <= vid_end && pvid != 1) { 952 if (pvid >= vid_begin && pvid <= vid_end) {
897 /* Default VLAN is always 1 */ 953 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
898 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
899 if (err) { 954 if (err) {
900 netdev_err(dev, "Unable to del PVID %d\n", pvid); 955 netdev_err(dev, "Unable to del PVID %d\n", pvid);
901 return err; 956 return err;
902 } 957 }
903 mlxsw_sp_port->pvid = 1;
904 } 958 }
905 959
906 if (init)
907 goto out;
908
909 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, 960 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
910 false, false); 961 false, false);
911 if (err) { 962 if (err) {
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 17d5571d0432..537974cfd427 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6137,28 +6137,28 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
6137 sw_cnt_1ms_ini = 16000000/rg_saw_cnt; 6137 sw_cnt_1ms_ini = 16000000/rg_saw_cnt;
6138 sw_cnt_1ms_ini &= 0x0fff; 6138 sw_cnt_1ms_ini &= 0x0fff;
6139 data = r8168_mac_ocp_read(tp, 0xd412); 6139 data = r8168_mac_ocp_read(tp, 0xd412);
6140 data &= 0x0fff; 6140 data &= ~0x0fff;
6141 data |= sw_cnt_1ms_ini; 6141 data |= sw_cnt_1ms_ini;
6142 r8168_mac_ocp_write(tp, 0xd412, data); 6142 r8168_mac_ocp_write(tp, 0xd412, data);
6143 } 6143 }
6144 6144
6145 data = r8168_mac_ocp_read(tp, 0xe056); 6145 data = r8168_mac_ocp_read(tp, 0xe056);
6146 data &= 0xf0; 6146 data &= ~0xf0;
6147 data |= 0x07; 6147 data |= 0x70;
6148 r8168_mac_ocp_write(tp, 0xe056, data); 6148 r8168_mac_ocp_write(tp, 0xe056, data);
6149 6149
6150 data = r8168_mac_ocp_read(tp, 0xe052); 6150 data = r8168_mac_ocp_read(tp, 0xe052);
6151 data &= 0x8008; 6151 data &= ~0x6000;
6152 data |= 0x6000; 6152 data |= 0x8008;
6153 r8168_mac_ocp_write(tp, 0xe052, data); 6153 r8168_mac_ocp_write(tp, 0xe052, data);
6154 6154
6155 data = r8168_mac_ocp_read(tp, 0xe0d6); 6155 data = r8168_mac_ocp_read(tp, 0xe0d6);
6156 data &= 0x01ff; 6156 data &= ~0x01ff;
6157 data |= 0x017f; 6157 data |= 0x017f;
6158 r8168_mac_ocp_write(tp, 0xe0d6, data); 6158 r8168_mac_ocp_write(tp, 0xe0d6, data);
6159 6159
6160 data = r8168_mac_ocp_read(tp, 0xd420); 6160 data = r8168_mac_ocp_read(tp, 0xd420);
6161 data &= 0x0fff; 6161 data &= ~0x0fff;
6162 data |= 0x047f; 6162 data |= 0x047f;
6163 r8168_mac_ocp_write(tp, 0xd420, data); 6163 r8168_mac_ocp_write(tp, 0xd420, data);
6164 6164
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ac43ed914fcf..744d7806a9ee 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1139,7 +1139,8 @@ static int ravb_set_ringparam(struct net_device *ndev,
1139 if (netif_running(ndev)) { 1139 if (netif_running(ndev)) {
1140 netif_device_detach(ndev); 1140 netif_device_detach(ndev);
1141 /* Stop PTP Clock driver */ 1141 /* Stop PTP Clock driver */
1142 ravb_ptp_stop(ndev); 1142 if (priv->chip_id == RCAR_GEN2)
1143 ravb_ptp_stop(ndev);
1143 /* Wait for DMA stopping */ 1144 /* Wait for DMA stopping */
1144 error = ravb_stop_dma(ndev); 1145 error = ravb_stop_dma(ndev);
1145 if (error) { 1146 if (error) {
@@ -1170,7 +1171,8 @@ static int ravb_set_ringparam(struct net_device *ndev,
1170 ravb_emac_init(ndev); 1171 ravb_emac_init(ndev);
1171 1172
1172 /* Initialise PTP Clock driver */ 1173 /* Initialise PTP Clock driver */
1173 ravb_ptp_init(ndev, priv->pdev); 1174 if (priv->chip_id == RCAR_GEN2)
1175 ravb_ptp_init(ndev, priv->pdev);
1174 1176
1175 netif_device_attach(ndev); 1177 netif_device_attach(ndev);
1176 } 1178 }
@@ -1298,7 +1300,8 @@ static void ravb_tx_timeout_work(struct work_struct *work)
1298 netif_tx_stop_all_queues(ndev); 1300 netif_tx_stop_all_queues(ndev);
1299 1301
1300 /* Stop PTP Clock driver */ 1302 /* Stop PTP Clock driver */
1301 ravb_ptp_stop(ndev); 1303 if (priv->chip_id == RCAR_GEN2)
1304 ravb_ptp_stop(ndev);
1302 1305
1303 /* Wait for DMA stopping */ 1306 /* Wait for DMA stopping */
1304 ravb_stop_dma(ndev); 1307 ravb_stop_dma(ndev);
@@ -1311,7 +1314,8 @@ static void ravb_tx_timeout_work(struct work_struct *work)
1311 ravb_emac_init(ndev); 1314 ravb_emac_init(ndev);
1312 1315
1313 /* Initialise PTP Clock driver */ 1316 /* Initialise PTP Clock driver */
1314 ravb_ptp_init(ndev, priv->pdev); 1317 if (priv->chip_id == RCAR_GEN2)
1318 ravb_ptp_init(ndev, priv->pdev);
1315 1319
1316 netif_tx_start_all_queues(ndev); 1320 netif_tx_start_all_queues(ndev);
1317} 1321}
@@ -1814,10 +1818,6 @@ static int ravb_probe(struct platform_device *pdev)
1814 CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB, CCC); 1818 CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB, CCC);
1815 } 1819 }
1816 1820
1817 /* Set CSEL value */
1818 ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | CCC_CSEL_HPB,
1819 CCC);
1820
1821 /* Set GTI value */ 1821 /* Set GTI value */
1822 error = ravb_set_gti(ndev); 1822 error = ravb_set_gti(ndev);
1823 if (error) 1823 if (error)
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 0e2fc1a844ab..db7db8ac4ca3 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2342,8 +2342,8 @@ static int smc_drv_probe(struct platform_device *pdev)
2342 } 2342 }
2343 2343
2344 ndev->irq = platform_get_irq(pdev, 0); 2344 ndev->irq = platform_get_irq(pdev, 0);
2345 if (ndev->irq <= 0) { 2345 if (ndev->irq < 0) {
2346 ret = -ENODEV; 2346 ret = ndev->irq;
2347 goto out_release_io; 2347 goto out_release_io;
2348 } 2348 }
2349 /* 2349 /*
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index e9cc61e1ec74..c3e85acfdc70 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -63,8 +63,12 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
63 mode = AM33XX_GMII_SEL_MODE_RGMII; 63 mode = AM33XX_GMII_SEL_MODE_RGMII;
64 break; 64 break;
65 65
66 case PHY_INTERFACE_MODE_MII:
67 default: 66 default:
67 dev_warn(priv->dev,
68 "Unsupported PHY mode: \"%s\". Defaulting to MII.\n",
69 phy_modes(phy_mode));
70 /* fallthrough */
71 case PHY_INTERFACE_MODE_MII:
68 mode = AM33XX_GMII_SEL_MODE_MII; 72 mode = AM33XX_GMII_SEL_MODE_MII;
69 break; 73 break;
70 }; 74 };
@@ -106,8 +110,12 @@ static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv,
106 mode = AM33XX_GMII_SEL_MODE_RGMII; 110 mode = AM33XX_GMII_SEL_MODE_RGMII;
107 break; 111 break;
108 112
109 case PHY_INTERFACE_MODE_MII:
110 default: 113 default:
114 dev_warn(priv->dev,
115 "Unsupported PHY mode: \"%s\". Defaulting to MII.\n",
116 phy_modes(phy_mode));
117 /* fallthrough */
118 case PHY_INTERFACE_MODE_MII:
111 mode = AM33XX_GMII_SEL_MODE_MII; 119 mode = AM33XX_GMII_SEL_MODE_MII;
112 break; 120 break;
113 }; 121 };
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index c61d66d38634..029841f98c32 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -117,21 +117,17 @@ static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc,
117 *ndesc = le32_to_cpu(desc->next_desc); 117 *ndesc = le32_to_cpu(desc->next_desc);
118} 118}
119 119
120static void get_pad_info(u32 *pad0, u32 *pad1, u32 *pad2, struct knav_dma_desc *desc) 120static u32 get_sw_data(int index, struct knav_dma_desc *desc)
121{ 121{
122 *pad0 = le32_to_cpu(desc->pad[0]); 122 /* No Endian conversion needed as this data is untouched by hw */
123 *pad1 = le32_to_cpu(desc->pad[1]); 123 return desc->sw_data[index];
124 *pad2 = le32_to_cpu(desc->pad[2]);
125} 124}
126 125
127static void get_pad_ptr(void **padptr, struct knav_dma_desc *desc) 126/* use these macros to get sw data */
128{ 127#define GET_SW_DATA0(desc) get_sw_data(0, desc)
129 u64 pad64; 128#define GET_SW_DATA1(desc) get_sw_data(1, desc)
130 129#define GET_SW_DATA2(desc) get_sw_data(2, desc)
131 pad64 = le32_to_cpu(desc->pad[0]) + 130#define GET_SW_DATA3(desc) get_sw_data(3, desc)
132 ((u64)le32_to_cpu(desc->pad[1]) << 32);
133 *padptr = (void *)(uintptr_t)pad64;
134}
135 131
136static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len, 132static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len,
137 struct knav_dma_desc *desc) 133 struct knav_dma_desc *desc)
@@ -163,13 +159,18 @@ static void set_desc_info(u32 desc_info, u32 pkt_info,
163 desc->packet_info = cpu_to_le32(pkt_info); 159 desc->packet_info = cpu_to_le32(pkt_info);
164} 160}
165 161
166static void set_pad_info(u32 pad0, u32 pad1, u32 pad2, struct knav_dma_desc *desc) 162static void set_sw_data(int index, u32 data, struct knav_dma_desc *desc)
167{ 163{
168 desc->pad[0] = cpu_to_le32(pad0); 164 /* No Endian conversion needed as this data is untouched by hw */
169 desc->pad[1] = cpu_to_le32(pad1); 165 desc->sw_data[index] = data;
170 desc->pad[2] = cpu_to_le32(pad1);
171} 166}
172 167
168/* use these macros to set sw data */
169#define SET_SW_DATA0(data, desc) set_sw_data(0, data, desc)
170#define SET_SW_DATA1(data, desc) set_sw_data(1, data, desc)
171#define SET_SW_DATA2(data, desc) set_sw_data(2, data, desc)
172#define SET_SW_DATA3(data, desc) set_sw_data(3, data, desc)
173
173static void set_org_pkt_info(dma_addr_t buff, u32 buff_len, 174static void set_org_pkt_info(dma_addr_t buff, u32 buff_len,
174 struct knav_dma_desc *desc) 175 struct knav_dma_desc *desc)
175{ 176{
@@ -581,7 +582,6 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
581 dma_addr_t dma_desc, dma_buf; 582 dma_addr_t dma_desc, dma_buf;
582 unsigned int buf_len, dma_sz = sizeof(*ndesc); 583 unsigned int buf_len, dma_sz = sizeof(*ndesc);
583 void *buf_ptr; 584 void *buf_ptr;
584 u32 pad[2];
585 u32 tmp; 585 u32 tmp;
586 586
587 get_words(&dma_desc, 1, &desc->next_desc); 587 get_words(&dma_desc, 1, &desc->next_desc);
@@ -593,14 +593,20 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
593 break; 593 break;
594 } 594 }
595 get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc); 595 get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc);
596 get_pad_ptr(&buf_ptr, ndesc); 596 /* warning!!!! We are retrieving the virtual ptr in the sw_data
597 * field as a 32bit value. Will not work on 64bit machines
598 */
599 buf_ptr = (void *)GET_SW_DATA0(ndesc);
600 buf_len = (int)GET_SW_DATA1(desc);
597 dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE); 601 dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
598 __free_page(buf_ptr); 602 __free_page(buf_ptr);
599 knav_pool_desc_put(netcp->rx_pool, desc); 603 knav_pool_desc_put(netcp->rx_pool, desc);
600 } 604 }
601 605 /* warning!!!! We are retrieving the virtual ptr in the sw_data
602 get_pad_info(&pad[0], &pad[1], &buf_len, desc); 606 * field as a 32bit value. Will not work on 64bit machines
603 buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); 607 */
608 buf_ptr = (void *)GET_SW_DATA0(desc);
609 buf_len = (int)GET_SW_DATA1(desc);
604 610
605 if (buf_ptr) 611 if (buf_ptr)
606 netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr); 612 netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr);
@@ -639,7 +645,6 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
639 dma_addr_t dma_desc, dma_buff; 645 dma_addr_t dma_desc, dma_buff;
640 struct netcp_packet p_info; 646 struct netcp_packet p_info;
641 struct sk_buff *skb; 647 struct sk_buff *skb;
642 u32 pad[2];
643 void *org_buf_ptr; 648 void *org_buf_ptr;
644 649
645 dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz); 650 dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
@@ -653,8 +658,11 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
653 } 658 }
654 659
655 get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc); 660 get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc);
656 get_pad_info(&pad[0], &pad[1], &org_buf_len, desc); 661 /* warning!!!! We are retrieving the virtual ptr in the sw_data
657 org_buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); 662 * field as a 32bit value. Will not work on 64bit machines
663 */
664 org_buf_ptr = (void *)GET_SW_DATA0(desc);
665 org_buf_len = (int)GET_SW_DATA1(desc);
658 666
659 if (unlikely(!org_buf_ptr)) { 667 if (unlikely(!org_buf_ptr)) {
660 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n"); 668 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
@@ -679,7 +687,6 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
679 /* Fill in the page fragment list */ 687 /* Fill in the page fragment list */
680 while (dma_desc) { 688 while (dma_desc) {
681 struct page *page; 689 struct page *page;
682 void *ptr;
683 690
684 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); 691 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
685 if (unlikely(!ndesc)) { 692 if (unlikely(!ndesc)) {
@@ -688,8 +695,10 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
688 } 695 }
689 696
690 get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc); 697 get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc);
691 get_pad_ptr(&ptr, ndesc); 698 /* warning!!!! We are retrieving the virtual ptr in the sw_data
692 page = ptr; 699 * field as a 32bit value. Will not work on 64bit machines
700 */
701 page = (struct page *)GET_SW_DATA0(desc);
693 702
694 if (likely(dma_buff && buf_len && page)) { 703 if (likely(dma_buff && buf_len && page)) {
695 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE, 704 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
@@ -777,7 +786,10 @@ static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
777 } 786 }
778 787
779 get_org_pkt_info(&dma, &buf_len, desc); 788 get_org_pkt_info(&dma, &buf_len, desc);
780 get_pad_ptr(&buf_ptr, desc); 789 /* warning!!!! We are retrieving the virtual ptr in the sw_data
790 * field as a 32bit value. Will not work on 64bit machines
791 */
792 buf_ptr = (void *)GET_SW_DATA0(desc);
781 793
782 if (unlikely(!dma)) { 794 if (unlikely(!dma)) {
783 dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n"); 795 dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n");
@@ -829,7 +841,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
829 struct page *page; 841 struct page *page;
830 dma_addr_t dma; 842 dma_addr_t dma;
831 void *bufptr; 843 void *bufptr;
832 u32 pad[3]; 844 u32 sw_data[2];
833 845
834 /* Allocate descriptor */ 846 /* Allocate descriptor */
835 hwdesc = knav_pool_desc_get(netcp->rx_pool); 847 hwdesc = knav_pool_desc_get(netcp->rx_pool);
@@ -846,7 +858,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
846 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 858 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
847 859
848 bufptr = netdev_alloc_frag(primary_buf_len); 860 bufptr = netdev_alloc_frag(primary_buf_len);
849 pad[2] = primary_buf_len; 861 sw_data[1] = primary_buf_len;
850 862
851 if (unlikely(!bufptr)) { 863 if (unlikely(!bufptr)) {
852 dev_warn_ratelimited(netcp->ndev_dev, 864 dev_warn_ratelimited(netcp->ndev_dev,
@@ -858,9 +870,10 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
858 if (unlikely(dma_mapping_error(netcp->dev, dma))) 870 if (unlikely(dma_mapping_error(netcp->dev, dma)))
859 goto fail; 871 goto fail;
860 872
861 pad[0] = lower_32_bits((uintptr_t)bufptr); 873 /* warning!!!! We are saving the virtual ptr in the sw_data
862 pad[1] = upper_32_bits((uintptr_t)bufptr); 874 * field as a 32bit value. Will not work on 64bit machines
863 875 */
876 sw_data[0] = (u32)bufptr;
864 } else { 877 } else {
865 /* Allocate a secondary receive queue entry */ 878 /* Allocate a secondary receive queue entry */
866 page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD); 879 page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
@@ -870,9 +883,11 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
870 } 883 }
871 buf_len = PAGE_SIZE; 884 buf_len = PAGE_SIZE;
872 dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE); 885 dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE);
873 pad[0] = lower_32_bits(dma); 886 /* warning!!!! We are saving the virtual ptr in the sw_data
874 pad[1] = upper_32_bits(dma); 887 * field as a 32bit value. Will not work on 64bit machines
875 pad[2] = 0; 888 */
889 sw_data[0] = (u32)page;
890 sw_data[1] = 0;
876 } 891 }
877 892
878 desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC; 893 desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC;
@@ -882,7 +897,8 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
882 pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) << 897 pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) <<
883 KNAV_DMA_DESC_RETQ_SHIFT; 898 KNAV_DMA_DESC_RETQ_SHIFT;
884 set_org_pkt_info(dma, buf_len, hwdesc); 899 set_org_pkt_info(dma, buf_len, hwdesc);
885 set_pad_info(pad[0], pad[1], pad[2], hwdesc); 900 SET_SW_DATA0(sw_data[0], hwdesc);
901 SET_SW_DATA1(sw_data[1], hwdesc);
886 set_desc_info(desc_info, pkt_info, hwdesc); 902 set_desc_info(desc_info, pkt_info, hwdesc);
887 903
888 /* Push to FDQs */ 904 /* Push to FDQs */
@@ -971,7 +987,6 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
971 unsigned int budget) 987 unsigned int budget)
972{ 988{
973 struct knav_dma_desc *desc; 989 struct knav_dma_desc *desc;
974 void *ptr;
975 struct sk_buff *skb; 990 struct sk_buff *skb;
976 unsigned int dma_sz; 991 unsigned int dma_sz;
977 dma_addr_t dma; 992 dma_addr_t dma;
@@ -988,8 +1003,10 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
988 continue; 1003 continue;
989 } 1004 }
990 1005
991 get_pad_ptr(&ptr, desc); 1006 /* warning!!!! We are retrieving the virtual ptr in the sw_data
992 skb = ptr; 1007 * field as a 32bit value. Will not work on 64bit machines
1008 */
1009 skb = (struct sk_buff *)GET_SW_DATA0(desc);
993 netcp_free_tx_desc_chain(netcp, desc, dma_sz); 1010 netcp_free_tx_desc_chain(netcp, desc, dma_sz);
994 if (!skb) { 1011 if (!skb) {
995 dev_err(netcp->ndev_dev, "No skb in Tx desc\n"); 1012 dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
@@ -1194,10 +1211,10 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp,
1194 } 1211 }
1195 1212
1196 set_words(&tmp, 1, &desc->packet_info); 1213 set_words(&tmp, 1, &desc->packet_info);
1197 tmp = lower_32_bits((uintptr_t)&skb); 1214 /* warning!!!! We are saving the virtual ptr in the sw_data
1198 set_words(&tmp, 1, &desc->pad[0]); 1215 * field as a 32bit value. Will not work on 64bit machines
1199 tmp = upper_32_bits((uintptr_t)&skb); 1216 */
1200 set_words(&tmp, 1, &desc->pad[1]); 1217 SET_SW_DATA0((u32)skb, desc);
1201 1218
1202 if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) { 1219 if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) {
1203 tmp = tx_pipe->switch_to_port; 1220 tmp = tx_pipe->switch_to_port;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 028e3873c310..0bf7edd99573 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1039,17 +1039,34 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
1039 return geneve_xmit_skb(skb, dev, info); 1039 return geneve_xmit_skb(skb, dev, info);
1040} 1040}
1041 1041
1042static int geneve_change_mtu(struct net_device *dev, int new_mtu) 1042static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
1043{ 1043{
1044 /* GENEVE overhead is not fixed, so we can't enforce a more 1044 /* The max_mtu calculation does not take account of GENEVE
1045 * precise max MTU. 1045 * options, to avoid excluding potentially valid
1046 * configurations.
1046 */ 1047 */
1047 if (new_mtu < 68 || new_mtu > IP_MAX_MTU) 1048 int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - sizeof(struct iphdr)
1049 - dev->hard_header_len;
1050
1051 if (new_mtu < 68)
1048 return -EINVAL; 1052 return -EINVAL;
1053
1054 if (new_mtu > max_mtu) {
1055 if (strict)
1056 return -EINVAL;
1057
1058 new_mtu = max_mtu;
1059 }
1060
1049 dev->mtu = new_mtu; 1061 dev->mtu = new_mtu;
1050 return 0; 1062 return 0;
1051} 1063}
1052 1064
1065static int geneve_change_mtu(struct net_device *dev, int new_mtu)
1066{
1067 return __geneve_change_mtu(dev, new_mtu, true);
1068}
1069
1053static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 1070static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
1054{ 1071{
1055 struct ip_tunnel_info *info = skb_tunnel_info(skb); 1072 struct ip_tunnel_info *info = skb_tunnel_info(skb);
@@ -1161,6 +1178,7 @@ static void geneve_setup(struct net_device *dev)
1161 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 1178 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1162 1179
1163 netif_keep_dst(dev); 1180 netif_keep_dst(dev);
1181 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1164 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; 1182 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
1165 eth_hw_addr_random(dev); 1183 eth_hw_addr_random(dev);
1166} 1184}
@@ -1452,14 +1470,15 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
1452 return dev; 1470 return dev;
1453 1471
1454 err = geneve_configure(net, dev, &geneve_remote_unspec, 1472 err = geneve_configure(net, dev, &geneve_remote_unspec,
1455 0, 0, 0, htons(dst_port), true, 0); 1473 0, 0, 0, htons(dst_port), true,
1474 GENEVE_F_UDP_ZERO_CSUM6_RX);
1456 if (err) 1475 if (err)
1457 goto err; 1476 goto err;
1458 1477
1459 /* openvswitch users expect packet sizes to be unrestricted, 1478 /* openvswitch users expect packet sizes to be unrestricted,
1460 * so set the largest MTU we can. 1479 * so set the largest MTU we can.
1461 */ 1480 */
1462 err = geneve_change_mtu(dev, IP_MAX_MTU); 1481 err = __geneve_change_mtu(dev, IP_MAX_MTU, false);
1463 if (err) 1482 if (err)
1464 goto err; 1483 goto err;
1465 1484
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 1d3a66563bac..98e34fee45c7 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1089,6 +1089,9 @@ static int netvsc_probe(struct hv_device *dev,
1089 net->ethtool_ops = &ethtool_ops; 1089 net->ethtool_ops = &ethtool_ops;
1090 SET_NETDEV_DEV(net, &dev->device); 1090 SET_NETDEV_DEV(net, &dev->device);
1091 1091
1092 /* We always need headroom for rndis header */
1093 net->needed_headroom = RNDIS_AND_PPI_SIZE;
1094
1092 /* Notify the netvsc driver of the new device */ 1095 /* Notify the netvsc driver of the new device */
1093 memset(&device_info, 0, sizeof(device_info)); 1096 memset(&device_info, 0, sizeof(device_info));
1094 device_info.ring_size = ring_size; 1097 device_info.ring_size = ring_size;
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index bf241a3ec5e5..db507e3bcab9 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -250,10 +250,6 @@ static int bcm7xxx_config_init(struct phy_device *phydev)
250 phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XX_64CLK_MDIO); 250 phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XX_64CLK_MDIO);
251 phy_read(phydev, MII_BCM7XXX_AUX_MODE); 251 phy_read(phydev, MII_BCM7XXX_AUX_MODE);
252 252
253 /* Workaround only required for 100Mbits/sec capable PHYs */
254 if (phydev->supported & PHY_GBIT_FEATURES)
255 return 0;
256
257 /* set shadow mode 2 */ 253 /* set shadow mode 2 */
258 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 254 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
259 MII_BCM7XXX_SHD_MODE_2, MII_BCM7XXX_SHD_MODE_2); 255 MII_BCM7XXX_SHD_MODE_2, MII_BCM7XXX_SHD_MODE_2);
@@ -270,7 +266,7 @@ static int bcm7xxx_config_init(struct phy_device *phydev)
270 phy_write(phydev, MII_BCM7XXX_100TX_FALSE_CAR, 0x7555); 266 phy_write(phydev, MII_BCM7XXX_100TX_FALSE_CAR, 0x7555);
271 267
272 /* reset shadow mode 2 */ 268 /* reset shadow mode 2 */
273 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, MII_BCM7XXX_SHD_MODE_2, 0); 269 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0, MII_BCM7XXX_SHD_MODE_2);
274 if (ret < 0) 270 if (ret < 0)
275 return ret; 271 return ret;
276 272
@@ -307,11 +303,6 @@ static int bcm7xxx_suspend(struct phy_device *phydev)
307 return 0; 303 return 0;
308} 304}
309 305
310static int bcm7xxx_dummy_config_init(struct phy_device *phydev)
311{
312 return 0;
313}
314
315#define BCM7XXX_28NM_GPHY(_oui, _name) \ 306#define BCM7XXX_28NM_GPHY(_oui, _name) \
316{ \ 307{ \
317 .phy_id = (_oui), \ 308 .phy_id = (_oui), \
@@ -337,7 +328,7 @@ static struct phy_driver bcm7xxx_driver[] = {
337 .phy_id = PHY_ID_BCM7425, 328 .phy_id = PHY_ID_BCM7425,
338 .phy_id_mask = 0xfffffff0, 329 .phy_id_mask = 0xfffffff0,
339 .name = "Broadcom BCM7425", 330 .name = "Broadcom BCM7425",
340 .features = PHY_GBIT_FEATURES | 331 .features = PHY_BASIC_FEATURES |
341 SUPPORTED_Pause | SUPPORTED_Asym_Pause, 332 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
342 .flags = PHY_IS_INTERNAL, 333 .flags = PHY_IS_INTERNAL,
343 .config_init = bcm7xxx_config_init, 334 .config_init = bcm7xxx_config_init,
@@ -349,7 +340,7 @@ static struct phy_driver bcm7xxx_driver[] = {
349 .phy_id = PHY_ID_BCM7429, 340 .phy_id = PHY_ID_BCM7429,
350 .phy_id_mask = 0xfffffff0, 341 .phy_id_mask = 0xfffffff0,
351 .name = "Broadcom BCM7429", 342 .name = "Broadcom BCM7429",
352 .features = PHY_GBIT_FEATURES | 343 .features = PHY_BASIC_FEATURES |
353 SUPPORTED_Pause | SUPPORTED_Asym_Pause, 344 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
354 .flags = PHY_IS_INTERNAL, 345 .flags = PHY_IS_INTERNAL,
355 .config_init = bcm7xxx_config_init, 346 .config_init = bcm7xxx_config_init,
@@ -361,7 +352,7 @@ static struct phy_driver bcm7xxx_driver[] = {
361 .phy_id = PHY_ID_BCM7435, 352 .phy_id = PHY_ID_BCM7435,
362 .phy_id_mask = 0xfffffff0, 353 .phy_id_mask = 0xfffffff0,
363 .name = "Broadcom BCM7435", 354 .name = "Broadcom BCM7435",
364 .features = PHY_GBIT_FEATURES | 355 .features = PHY_BASIC_FEATURES |
365 SUPPORTED_Pause | SUPPORTED_Asym_Pause, 356 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
366 .flags = PHY_IS_INTERNAL, 357 .flags = PHY_IS_INTERNAL,
367 .config_init = bcm7xxx_config_init, 358 .config_init = bcm7xxx_config_init,
@@ -369,30 +360,6 @@ static struct phy_driver bcm7xxx_driver[] = {
369 .read_status = genphy_read_status, 360 .read_status = genphy_read_status,
370 .suspend = bcm7xxx_suspend, 361 .suspend = bcm7xxx_suspend,
371 .resume = bcm7xxx_config_init, 362 .resume = bcm7xxx_config_init,
372}, {
373 .phy_id = PHY_BCM_OUI_4,
374 .phy_id_mask = 0xffff0000,
375 .name = "Broadcom BCM7XXX 40nm",
376 .features = PHY_GBIT_FEATURES |
377 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
378 .flags = PHY_IS_INTERNAL,
379 .config_init = bcm7xxx_config_init,
380 .config_aneg = genphy_config_aneg,
381 .read_status = genphy_read_status,
382 .suspend = bcm7xxx_suspend,
383 .resume = bcm7xxx_config_init,
384}, {
385 .phy_id = PHY_BCM_OUI_5,
386 .phy_id_mask = 0xffffff00,
387 .name = "Broadcom BCM7XXX 65nm",
388 .features = PHY_BASIC_FEATURES |
389 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
390 .flags = PHY_IS_INTERNAL,
391 .config_init = bcm7xxx_dummy_config_init,
392 .config_aneg = genphy_config_aneg,
393 .read_status = genphy_read_status,
394 .suspend = bcm7xxx_suspend,
395 .resume = bcm7xxx_config_init,
396} }; 363} };
397 364
398static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { 365static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
@@ -404,8 +371,6 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
404 { PHY_ID_BCM7439, 0xfffffff0, }, 371 { PHY_ID_BCM7439, 0xfffffff0, },
405 { PHY_ID_BCM7435, 0xfffffff0, }, 372 { PHY_ID_BCM7435, 0xfffffff0, },
406 { PHY_ID_BCM7445, 0xfffffff0, }, 373 { PHY_ID_BCM7445, 0xfffffff0, },
407 { PHY_BCM_OUI_4, 0xffff0000 },
408 { PHY_BCM_OUI_5, 0xffffff00 },
409 { } 374 { }
410}; 375};
411 376
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e3eb96443c97..ab1d0fcaf1d9 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -446,6 +446,12 @@ static int m88e1510_config_aneg(struct phy_device *phydev)
446 if (err < 0) 446 if (err < 0)
447 return err; 447 return err;
448 448
449 return 0;
450}
451
452static int marvell_config_init(struct phy_device *phydev)
453{
454 /* Set registers from marvell,reg-init DT property */
449 return marvell_of_reg_init(phydev); 455 return marvell_of_reg_init(phydev);
450} 456}
451 457
@@ -495,7 +501,7 @@ static int m88e1116r_config_init(struct phy_device *phydev)
495 501
496 mdelay(500); 502 mdelay(500);
497 503
498 return 0; 504 return marvell_config_init(phydev);
499} 505}
500 506
501static int m88e3016_config_init(struct phy_device *phydev) 507static int m88e3016_config_init(struct phy_device *phydev)
@@ -514,7 +520,7 @@ static int m88e3016_config_init(struct phy_device *phydev)
514 if (reg < 0) 520 if (reg < 0)
515 return reg; 521 return reg;
516 522
517 return 0; 523 return marvell_config_init(phydev);
518} 524}
519 525
520static int m88e1111_config_init(struct phy_device *phydev) 526static int m88e1111_config_init(struct phy_device *phydev)
@@ -1078,6 +1084,7 @@ static struct phy_driver marvell_drivers[] = {
1078 .features = PHY_GBIT_FEATURES, 1084 .features = PHY_GBIT_FEATURES,
1079 .probe = marvell_probe, 1085 .probe = marvell_probe,
1080 .flags = PHY_HAS_INTERRUPT, 1086 .flags = PHY_HAS_INTERRUPT,
1087 .config_init = &marvell_config_init,
1081 .config_aneg = &marvell_config_aneg, 1088 .config_aneg = &marvell_config_aneg,
1082 .read_status = &genphy_read_status, 1089 .read_status = &genphy_read_status,
1083 .ack_interrupt = &marvell_ack_interrupt, 1090 .ack_interrupt = &marvell_ack_interrupt,
@@ -1149,6 +1156,7 @@ static struct phy_driver marvell_drivers[] = {
1149 .features = PHY_GBIT_FEATURES, 1156 .features = PHY_GBIT_FEATURES,
1150 .flags = PHY_HAS_INTERRUPT, 1157 .flags = PHY_HAS_INTERRUPT,
1151 .probe = marvell_probe, 1158 .probe = marvell_probe,
1159 .config_init = &marvell_config_init,
1152 .config_aneg = &m88e1121_config_aneg, 1160 .config_aneg = &m88e1121_config_aneg,
1153 .read_status = &marvell_read_status, 1161 .read_status = &marvell_read_status,
1154 .ack_interrupt = &marvell_ack_interrupt, 1162 .ack_interrupt = &marvell_ack_interrupt,
@@ -1167,6 +1175,7 @@ static struct phy_driver marvell_drivers[] = {
1167 .features = PHY_GBIT_FEATURES, 1175 .features = PHY_GBIT_FEATURES,
1168 .flags = PHY_HAS_INTERRUPT, 1176 .flags = PHY_HAS_INTERRUPT,
1169 .probe = marvell_probe, 1177 .probe = marvell_probe,
1178 .config_init = &marvell_config_init,
1170 .config_aneg = &m88e1318_config_aneg, 1179 .config_aneg = &m88e1318_config_aneg,
1171 .read_status = &marvell_read_status, 1180 .read_status = &marvell_read_status,
1172 .ack_interrupt = &marvell_ack_interrupt, 1181 .ack_interrupt = &marvell_ack_interrupt,
@@ -1259,6 +1268,7 @@ static struct phy_driver marvell_drivers[] = {
1259 .features = PHY_GBIT_FEATURES, 1268 .features = PHY_GBIT_FEATURES,
1260 .flags = PHY_HAS_INTERRUPT, 1269 .flags = PHY_HAS_INTERRUPT,
1261 .probe = marvell_probe, 1270 .probe = marvell_probe,
1271 .config_init = &marvell_config_init,
1262 .config_aneg = &m88e1510_config_aneg, 1272 .config_aneg = &m88e1510_config_aneg,
1263 .read_status = &marvell_read_status, 1273 .read_status = &marvell_read_status,
1264 .ack_interrupt = &marvell_ack_interrupt, 1274 .ack_interrupt = &marvell_ack_interrupt,
@@ -1277,6 +1287,7 @@ static struct phy_driver marvell_drivers[] = {
1277 .features = PHY_GBIT_FEATURES, 1287 .features = PHY_GBIT_FEATURES,
1278 .flags = PHY_HAS_INTERRUPT, 1288 .flags = PHY_HAS_INTERRUPT,
1279 .probe = marvell_probe, 1289 .probe = marvell_probe,
1290 .config_init = &marvell_config_init,
1280 .config_aneg = &m88e1510_config_aneg, 1291 .config_aneg = &m88e1510_config_aneg,
1281 .read_status = &marvell_read_status, 1292 .read_status = &marvell_read_status,
1282 .ack_interrupt = &marvell_ack_interrupt, 1293 .ack_interrupt = &marvell_ack_interrupt,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index bad3f005faee..e551f3a89cfd 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1410,7 +1410,7 @@ int genphy_config_init(struct phy_device *phydev)
1410 1410
1411 features = (SUPPORTED_TP | SUPPORTED_MII 1411 features = (SUPPORTED_TP | SUPPORTED_MII
1412 | SUPPORTED_AUI | SUPPORTED_FIBRE | 1412 | SUPPORTED_AUI | SUPPORTED_FIBRE |
1413 SUPPORTED_BNC); 1413 SUPPORTED_BNC | SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1414 1414
1415 /* Do we support autonegotiation? */ 1415 /* Do we support autonegotiation? */
1416 val = phy_read(phydev, MII_BMSR); 1416 val = phy_read(phydev, MII_BMSR);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index f3c63022eb3c..4ddae8118c85 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -395,6 +395,8 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
395 395
396 if (!__pppoe_xmit(sk_pppox(relay_po), skb)) 396 if (!__pppoe_xmit(sk_pppox(relay_po), skb))
397 goto abort_put; 397 goto abort_put;
398
399 sock_put(sk_pppox(relay_po));
398 } else { 400 } else {
399 if (sock_queue_rcv_skb(sk, skb)) 401 if (sock_queue_rcv_skb(sk, skb))
400 goto abort_kfree; 402 goto abort_kfree;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 7f83504dfa69..cdde59089f72 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -395,6 +395,10 @@ config USB_NET_RNDIS_HOST
395 The protocol specification is incomplete, and is controlled by 395 The protocol specification is incomplete, and is controlled by
396 (and for) Microsoft; it isn't an "Open" ecosystem or market. 396 (and for) Microsoft; it isn't an "Open" ecosystem or market.
397 397
398config USB_NET_CDC_SUBSET_ENABLE
399 tristate
400 depends on USB_NET_CDC_SUBSET
401
398config USB_NET_CDC_SUBSET 402config USB_NET_CDC_SUBSET
399 tristate "Simple USB Network Links (CDC Ethernet subset)" 403 tristate "Simple USB Network Links (CDC Ethernet subset)"
400 depends on USB_USBNET 404 depends on USB_USBNET
@@ -413,6 +417,7 @@ config USB_NET_CDC_SUBSET
413config USB_ALI_M5632 417config USB_ALI_M5632
414 bool "ALi M5632 based 'USB 2.0 Data Link' cables" 418 bool "ALi M5632 based 'USB 2.0 Data Link' cables"
415 depends on USB_NET_CDC_SUBSET 419 depends on USB_NET_CDC_SUBSET
420 select USB_NET_CDC_SUBSET_ENABLE
416 help 421 help
417 Choose this option if you're using a host-to-host cable 422 Choose this option if you're using a host-to-host cable
418 based on this design, which supports USB 2.0 high speed. 423 based on this design, which supports USB 2.0 high speed.
@@ -420,6 +425,7 @@ config USB_ALI_M5632
420config USB_AN2720 425config USB_AN2720
421 bool "AnchorChips 2720 based cables (Xircom PGUNET, ...)" 426 bool "AnchorChips 2720 based cables (Xircom PGUNET, ...)"
422 depends on USB_NET_CDC_SUBSET 427 depends on USB_NET_CDC_SUBSET
428 select USB_NET_CDC_SUBSET_ENABLE
423 help 429 help
424 Choose this option if you're using a host-to-host cable 430 Choose this option if you're using a host-to-host cable
425 based on this design. Note that AnchorChips is now a 431 based on this design. Note that AnchorChips is now a
@@ -428,6 +434,7 @@ config USB_AN2720
428config USB_BELKIN 434config USB_BELKIN
429 bool "eTEK based host-to-host cables (Advance, Belkin, ...)" 435 bool "eTEK based host-to-host cables (Advance, Belkin, ...)"
430 depends on USB_NET_CDC_SUBSET 436 depends on USB_NET_CDC_SUBSET
437 select USB_NET_CDC_SUBSET_ENABLE
431 default y 438 default y
432 help 439 help
433 Choose this option if you're using a host-to-host cable 440 Choose this option if you're using a host-to-host cable
@@ -437,6 +444,7 @@ config USB_BELKIN
437config USB_ARMLINUX 444config USB_ARMLINUX
438 bool "Embedded ARM Linux links (iPaq, ...)" 445 bool "Embedded ARM Linux links (iPaq, ...)"
439 depends on USB_NET_CDC_SUBSET 446 depends on USB_NET_CDC_SUBSET
447 select USB_NET_CDC_SUBSET_ENABLE
440 default y 448 default y
441 help 449 help
442 Choose this option to support the "usb-eth" networking driver 450 Choose this option to support the "usb-eth" networking driver
@@ -454,6 +462,7 @@ config USB_ARMLINUX
454config USB_EPSON2888 462config USB_EPSON2888
455 bool "Epson 2888 based firmware (DEVELOPMENT)" 463 bool "Epson 2888 based firmware (DEVELOPMENT)"
456 depends on USB_NET_CDC_SUBSET 464 depends on USB_NET_CDC_SUBSET
465 select USB_NET_CDC_SUBSET_ENABLE
457 help 466 help
458 Choose this option to support the usb networking links used 467 Choose this option to support the usb networking links used
459 by some sample firmware from Epson. 468 by some sample firmware from Epson.
@@ -461,6 +470,7 @@ config USB_EPSON2888
461config USB_KC2190 470config USB_KC2190
462 bool "KT Technology KC2190 based cables (InstaNet)" 471 bool "KT Technology KC2190 based cables (InstaNet)"
463 depends on USB_NET_CDC_SUBSET 472 depends on USB_NET_CDC_SUBSET
473 select USB_NET_CDC_SUBSET_ENABLE
464 help 474 help
465 Choose this option if you're using a host-to-host cable 475 Choose this option if you're using a host-to-host cable
466 with one of these chips. 476 with one of these chips.
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index b5f04068dbe4..37fb46aee341 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -23,7 +23,7 @@ obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
23obj-$(CONFIG_USB_NET_NET1080) += net1080.o 23obj-$(CONFIG_USB_NET_NET1080) += net1080.o
24obj-$(CONFIG_USB_NET_PLUSB) += plusb.o 24obj-$(CONFIG_USB_NET_PLUSB) += plusb.o
25obj-$(CONFIG_USB_NET_RNDIS_HOST) += rndis_host.o 25obj-$(CONFIG_USB_NET_RNDIS_HOST) += rndis_host.o
26obj-$(CONFIG_USB_NET_CDC_SUBSET) += cdc_subset.o 26obj-$(CONFIG_USB_NET_CDC_SUBSET_ENABLE) += cdc_subset.o
27obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o 27obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o
28obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o 28obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
29obj-$(CONFIG_USB_USBNET) += usbnet.o 29obj-$(CONFIG_USB_USBNET) += usbnet.o
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 23e9880791fc..570deef53f74 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -637,6 +637,7 @@ static const struct usb_device_id products[] = {
637 637
638 /* 3. Combined interface devices matching on interface number */ 638 /* 3. Combined interface devices matching on interface number */
639 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ 639 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
640 {QMI_FIXED_INTF(0x05c6, 0x6001, 3)}, /* 4G LTE usb-modem U901 */
640 {QMI_FIXED_INTF(0x05c6, 0x7000, 0)}, 641 {QMI_FIXED_INTF(0x05c6, 0x7000, 0)},
641 {QMI_FIXED_INTF(0x05c6, 0x7001, 1)}, 642 {QMI_FIXED_INTF(0x05c6, 0x7001, 1)},
642 {QMI_FIXED_INTF(0x05c6, 0x7002, 1)}, 643 {QMI_FIXED_INTF(0x05c6, 0x7002, 1)},
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index 221a53025fd0..72ba8ae7f09a 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -377,7 +377,7 @@ union Vmxnet3_GenericDesc {
377#define VMXNET3_TX_RING_MAX_SIZE 4096 377#define VMXNET3_TX_RING_MAX_SIZE 4096
378#define VMXNET3_TC_RING_MAX_SIZE 4096 378#define VMXNET3_TC_RING_MAX_SIZE 4096
379#define VMXNET3_RX_RING_MAX_SIZE 4096 379#define VMXNET3_RX_RING_MAX_SIZE 4096
380#define VMXNET3_RX_RING2_MAX_SIZE 2048 380#define VMXNET3_RX_RING2_MAX_SIZE 4096
381#define VMXNET3_RC_RING_MAX_SIZE 8192 381#define VMXNET3_RC_RING_MAX_SIZE 8192
382 382
383/* a list of reasons for queue stop */ 383/* a list of reasons for queue stop */
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index bdb8a6c0f8aa..729c344e6774 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.5.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.4.6.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040500 75#define VMXNET3_DRIVER_VERSION_NUM 0x01040600
76 76
77#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index a31cd954b308..e6944b29588e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2171,9 +2171,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
2171#endif 2171#endif
2172 } 2172 }
2173 2173
2174 if (vxlan->flags & VXLAN_F_COLLECT_METADATA && 2174 if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
2175 info && info->mode & IP_TUNNEL_INFO_TX) { 2175 if (info && info->mode & IP_TUNNEL_INFO_TX)
2176 vxlan_xmit_one(skb, dev, NULL, false); 2176 vxlan_xmit_one(skb, dev, NULL, false);
2177 else
2178 kfree_skb(skb);
2177 return NETDEV_TX_OK; 2179 return NETDEV_TX_OK;
2178 } 2180 }
2179 2181
@@ -2537,6 +2539,7 @@ static void vxlan_setup(struct net_device *dev)
2537 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 2539 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2538 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; 2540 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2539 netif_keep_dst(dev); 2541 netif_keep_dst(dev);
2542 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2540 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; 2543 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
2541 2544
2542 INIT_LIST_HEAD(&vxlan->next); 2545 INIT_LIST_HEAD(&vxlan->next);
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 7a72407208b1..629225980463 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -1626,7 +1626,7 @@ try:
1626 if (state & Xpr) { 1626 if (state & Xpr) {
1627 void __iomem *scc_addr; 1627 void __iomem *scc_addr;
1628 unsigned long ring; 1628 unsigned long ring;
1629 int i; 1629 unsigned int i;
1630 1630
1631 /* 1631 /*
1632 * - the busy condition happens (sometimes); 1632 * - the busy condition happens (sometimes);
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 866067789330..7438fbeef744 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -53,7 +53,6 @@ config IWLWIFI_LEDS
53 53
54config IWLDVM 54config IWLDVM
55 tristate "Intel Wireless WiFi DVM Firmware support" 55 tristate "Intel Wireless WiFi DVM Firmware support"
56 depends on m
57 help 56 help
58 This is the driver that supports the DVM firmware. The list 57 This is the driver that supports the DVM firmware. The list
59 of the devices that use this firmware is available here: 58 of the devices that use this firmware is available here:
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index c84a0299d43e..bce9b3420a13 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright(c) 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as 13 * it under the terms of version 2 of the GNU General Public License as
@@ -70,12 +71,15 @@
70 71
71/* Highest firmware API version supported */ 72/* Highest firmware API version supported */
72#define IWL8000_UCODE_API_MAX 20 73#define IWL8000_UCODE_API_MAX 20
74#define IWL8265_UCODE_API_MAX 20
73 75
74/* Oldest version we won't warn about */ 76/* Oldest version we won't warn about */
75#define IWL8000_UCODE_API_OK 13 77#define IWL8000_UCODE_API_OK 13
78#define IWL8265_UCODE_API_OK 20
76 79
77/* Lowest firmware API version supported */ 80/* Lowest firmware API version supported */
78#define IWL8000_UCODE_API_MIN 13 81#define IWL8000_UCODE_API_MIN 13
82#define IWL8265_UCODE_API_MIN 20
79 83
80/* NVM versions */ 84/* NVM versions */
81#define IWL8000_NVM_VERSION 0x0a1d 85#define IWL8000_NVM_VERSION 0x0a1d
@@ -93,6 +97,10 @@
93#define IWL8000_MODULE_FIRMWARE(api) \ 97#define IWL8000_MODULE_FIRMWARE(api) \
94 IWL8000_FW_PRE "-" __stringify(api) ".ucode" 98 IWL8000_FW_PRE "-" __stringify(api) ".ucode"
95 99
100#define IWL8265_FW_PRE "iwlwifi-8265-"
101#define IWL8265_MODULE_FIRMWARE(api) \
102 IWL8265_FW_PRE __stringify(api) ".ucode"
103
96#define NVM_HW_SECTION_NUM_FAMILY_8000 10 104#define NVM_HW_SECTION_NUM_FAMILY_8000 10
97#define DEFAULT_NVM_FILE_FAMILY_8000B "nvmData-8000B" 105#define DEFAULT_NVM_FILE_FAMILY_8000B "nvmData-8000B"
98#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C" 106#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C"
@@ -144,10 +152,7 @@ static const struct iwl_tt_params iwl8000_tt_params = {
144 .support_tx_backoff = true, 152 .support_tx_backoff = true,
145}; 153};
146 154
147#define IWL_DEVICE_8000 \ 155#define IWL_DEVICE_8000_COMMON \
148 .ucode_api_max = IWL8000_UCODE_API_MAX, \
149 .ucode_api_ok = IWL8000_UCODE_API_OK, \
150 .ucode_api_min = IWL8000_UCODE_API_MIN, \
151 .device_family = IWL_DEVICE_FAMILY_8000, \ 156 .device_family = IWL_DEVICE_FAMILY_8000, \
152 .max_inst_size = IWL60_RTC_INST_SIZE, \ 157 .max_inst_size = IWL60_RTC_INST_SIZE, \
153 .max_data_size = IWL60_RTC_DATA_SIZE, \ 158 .max_data_size = IWL60_RTC_DATA_SIZE, \
@@ -167,10 +172,28 @@ static const struct iwl_tt_params iwl8000_tt_params = {
167 .thermal_params = &iwl8000_tt_params, \ 172 .thermal_params = &iwl8000_tt_params, \
168 .apmg_not_supported = true 173 .apmg_not_supported = true
169 174
175#define IWL_DEVICE_8000 \
176 IWL_DEVICE_8000_COMMON, \
177 .ucode_api_max = IWL8000_UCODE_API_MAX, \
178 .ucode_api_ok = IWL8000_UCODE_API_OK, \
179 .ucode_api_min = IWL8000_UCODE_API_MIN \
180
181#define IWL_DEVICE_8260 \
182 IWL_DEVICE_8000_COMMON, \
183 .ucode_api_max = IWL8000_UCODE_API_MAX, \
184 .ucode_api_ok = IWL8000_UCODE_API_OK, \
185 .ucode_api_min = IWL8000_UCODE_API_MIN \
186
187#define IWL_DEVICE_8265 \
188 IWL_DEVICE_8000_COMMON, \
189 .ucode_api_max = IWL8265_UCODE_API_MAX, \
190 .ucode_api_ok = IWL8265_UCODE_API_OK, \
191 .ucode_api_min = IWL8265_UCODE_API_MIN \
192
170const struct iwl_cfg iwl8260_2n_cfg = { 193const struct iwl_cfg iwl8260_2n_cfg = {
171 .name = "Intel(R) Dual Band Wireless N 8260", 194 .name = "Intel(R) Dual Band Wireless N 8260",
172 .fw_name_pre = IWL8000_FW_PRE, 195 .fw_name_pre = IWL8000_FW_PRE,
173 IWL_DEVICE_8000, 196 IWL_DEVICE_8260,
174 .ht_params = &iwl8000_ht_params, 197 .ht_params = &iwl8000_ht_params,
175 .nvm_ver = IWL8000_NVM_VERSION, 198 .nvm_ver = IWL8000_NVM_VERSION,
176 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 199 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -179,7 +202,7 @@ const struct iwl_cfg iwl8260_2n_cfg = {
179const struct iwl_cfg iwl8260_2ac_cfg = { 202const struct iwl_cfg iwl8260_2ac_cfg = {
180 .name = "Intel(R) Dual Band Wireless AC 8260", 203 .name = "Intel(R) Dual Band Wireless AC 8260",
181 .fw_name_pre = IWL8000_FW_PRE, 204 .fw_name_pre = IWL8000_FW_PRE,
182 IWL_DEVICE_8000, 205 IWL_DEVICE_8260,
183 .ht_params = &iwl8000_ht_params, 206 .ht_params = &iwl8000_ht_params,
184 .nvm_ver = IWL8000_NVM_VERSION, 207 .nvm_ver = IWL8000_NVM_VERSION,
185 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 208 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -188,8 +211,8 @@ const struct iwl_cfg iwl8260_2ac_cfg = {
188 211
189const struct iwl_cfg iwl8265_2ac_cfg = { 212const struct iwl_cfg iwl8265_2ac_cfg = {
190 .name = "Intel(R) Dual Band Wireless AC 8265", 213 .name = "Intel(R) Dual Band Wireless AC 8265",
191 .fw_name_pre = IWL8000_FW_PRE, 214 .fw_name_pre = IWL8265_FW_PRE,
192 IWL_DEVICE_8000, 215 IWL_DEVICE_8265,
193 .ht_params = &iwl8000_ht_params, 216 .ht_params = &iwl8000_ht_params,
194 .nvm_ver = IWL8000_NVM_VERSION, 217 .nvm_ver = IWL8000_NVM_VERSION,
195 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 218 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -209,7 +232,7 @@ const struct iwl_cfg iwl4165_2ac_cfg = {
209const struct iwl_cfg iwl8260_2ac_sdio_cfg = { 232const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
210 .name = "Intel(R) Dual Band Wireless-AC 8260", 233 .name = "Intel(R) Dual Band Wireless-AC 8260",
211 .fw_name_pre = IWL8000_FW_PRE, 234 .fw_name_pre = IWL8000_FW_PRE,
212 IWL_DEVICE_8000, 235 IWL_DEVICE_8260,
213 .ht_params = &iwl8000_ht_params, 236 .ht_params = &iwl8000_ht_params,
214 .nvm_ver = IWL8000_NVM_VERSION, 237 .nvm_ver = IWL8000_NVM_VERSION,
215 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 238 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -236,3 +259,4 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
236}; 259};
237 260
238MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK)); 261MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
262MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_OK));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 7acb49075683..ab4c2a0470b2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -243,8 +243,10 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
243 if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { 243 if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
244 char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev); 244 char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev);
245 245
246 snprintf(drv->firmware_name, sizeof(drv->firmware_name), 246 if (rev_step != 'A')
247 "%s%c-%s.ucode", name_pre, rev_step, tag); 247 snprintf(drv->firmware_name,
248 sizeof(drv->firmware_name), "%s%c-%s.ucode",
249 name_pre, rev_step, tag);
248 } 250 }
249 251
250 IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n", 252 IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 9a15642f80dd..ea1e177c2ea1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1298,6 +1298,10 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
1298 return -EBUSY; 1298 return -EBUSY;
1299 } 1299 }
1300 1300
1301 /* we don't support "match all" in the firmware */
1302 if (!req->n_match_sets)
1303 return -EOPNOTSUPP;
1304
1301 ret = iwl_mvm_check_running_scans(mvm, type); 1305 ret = iwl_mvm_check_running_scans(mvm, type);
1302 if (ret) 1306 if (ret)
1303 return ret; 1307 return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index cc3888e2700d..73c95594eabe 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -490,6 +490,15 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans)
490 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 490 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
491} 491}
492 492
493static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
494{
495 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
496
497 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
498 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
499 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
500}
501
493static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 502static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
494{ 503{
495 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 504 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index ccafbd8cf4b3..152cf9ad9566 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1438,9 +1438,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1438 inta & ~trans_pcie->inta_mask); 1438 inta & ~trans_pcie->inta_mask);
1439 } 1439 }
1440 1440
1441 /* Re-enable all interrupts */ 1441 /* we are loading the firmware, enable FH_TX interrupt only */
1442 /* only Re-enable if disabled by irq */ 1442 if (handled & CSR_INT_BIT_FH_TX)
1443 if (test_bit(STATUS_INT_ENABLED, &trans->status)) 1443 iwl_enable_fw_load_int(trans);
1444 /* only Re-enable all interrupt if disabled by irq */
1445 else if (test_bit(STATUS_INT_ENABLED, &trans->status))
1444 iwl_enable_interrupts(trans); 1446 iwl_enable_interrupts(trans);
1445 /* Re-enable RF_KILL if it occurred */ 1447 /* Re-enable RF_KILL if it occurred */
1446 else if (handled & CSR_INT_BIT_RF_KILL) 1448 else if (handled & CSR_INT_BIT_RF_KILL)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index d60a467a983c..5a854c609477 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1021,82 +1021,6 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
1021 &first_ucode_section); 1021 &first_ucode_section);
1022} 1022}
1023 1023
1024static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1025 const struct fw_img *fw, bool run_in_rfkill)
1026{
1027 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1028 bool hw_rfkill;
1029 int ret;
1030
1031 mutex_lock(&trans_pcie->mutex);
1032
1033 /* Someone called stop_device, don't try to start_fw */
1034 if (trans_pcie->is_down) {
1035 IWL_WARN(trans,
1036 "Can't start_fw since the HW hasn't been started\n");
1037 ret = EIO;
1038 goto out;
1039 }
1040
1041 /* This may fail if AMT took ownership of the device */
1042 if (iwl_pcie_prepare_card_hw(trans)) {
1043 IWL_WARN(trans, "Exit HW not ready\n");
1044 ret = -EIO;
1045 goto out;
1046 }
1047
1048 iwl_enable_rfkill_int(trans);
1049
1050 /* If platform's RF_KILL switch is NOT set to KILL */
1051 hw_rfkill = iwl_is_rfkill_set(trans);
1052 if (hw_rfkill)
1053 set_bit(STATUS_RFKILL, &trans->status);
1054 else
1055 clear_bit(STATUS_RFKILL, &trans->status);
1056 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1057 if (hw_rfkill && !run_in_rfkill) {
1058 ret = -ERFKILL;
1059 goto out;
1060 }
1061
1062 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1063
1064 ret = iwl_pcie_nic_init(trans);
1065 if (ret) {
1066 IWL_ERR(trans, "Unable to init nic\n");
1067 goto out;
1068 }
1069
1070 /* make sure rfkill handshake bits are cleared */
1071 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1072 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1073 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1074
1075 /* clear (again), then enable host interrupts */
1076 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1077 iwl_enable_interrupts(trans);
1078
1079 /* really make sure rfkill handshake bits are cleared */
1080 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1081 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1082
1083 /* Load the given image to the HW */
1084 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1085 ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1086 else
1087 ret = iwl_pcie_load_given_ucode(trans, fw);
1088
1089out:
1090 mutex_unlock(&trans_pcie->mutex);
1091 return ret;
1092}
1093
1094static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1095{
1096 iwl_pcie_reset_ict(trans);
1097 iwl_pcie_tx_start(trans, scd_addr);
1098}
1099
1100static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) 1024static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1101{ 1025{
1102 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1026 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1127,7 +1051,8 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1127 * already dead. 1051 * already dead.
1128 */ 1052 */
1129 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { 1053 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
1130 IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); 1054 IWL_DEBUG_INFO(trans,
1055 "DEVICE_ENABLED bit was set and is now cleared\n");
1131 iwl_pcie_tx_stop(trans); 1056 iwl_pcie_tx_stop(trans);
1132 iwl_pcie_rx_stop(trans); 1057 iwl_pcie_rx_stop(trans);
1133 1058
@@ -1161,7 +1086,6 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1161 iwl_disable_interrupts(trans); 1086 iwl_disable_interrupts(trans);
1162 spin_unlock(&trans_pcie->irq_lock); 1087 spin_unlock(&trans_pcie->irq_lock);
1163 1088
1164
1165 /* clear all status bits */ 1089 /* clear all status bits */
1166 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1090 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1167 clear_bit(STATUS_INT_ENABLED, &trans->status); 1091 clear_bit(STATUS_INT_ENABLED, &trans->status);
@@ -1194,10 +1118,116 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1194 if (hw_rfkill != was_hw_rfkill) 1118 if (hw_rfkill != was_hw_rfkill)
1195 iwl_trans_pcie_rf_kill(trans, hw_rfkill); 1119 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1196 1120
1197 /* re-take ownership to prevent other users from stealing the deivce */ 1121 /* re-take ownership to prevent other users from stealing the device */
1198 iwl_pcie_prepare_card_hw(trans); 1122 iwl_pcie_prepare_card_hw(trans);
1199} 1123}
1200 1124
1125static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1126 const struct fw_img *fw, bool run_in_rfkill)
1127{
1128 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1129 bool hw_rfkill;
1130 int ret;
1131
1132 /* This may fail if AMT took ownership of the device */
1133 if (iwl_pcie_prepare_card_hw(trans)) {
1134 IWL_WARN(trans, "Exit HW not ready\n");
1135 ret = -EIO;
1136 goto out;
1137 }
1138
1139 iwl_enable_rfkill_int(trans);
1140
1141 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1142
1143 /*
1144 * We enabled the RF-Kill interrupt and the handler may very
1145 * well be running. Disable the interrupts to make sure no other
1146 * interrupt can be fired.
1147 */
1148 iwl_disable_interrupts(trans);
1149
1150 /* Make sure it finished running */
1151 synchronize_irq(trans_pcie->pci_dev->irq);
1152
1153 mutex_lock(&trans_pcie->mutex);
1154
1155 /* If platform's RF_KILL switch is NOT set to KILL */
1156 hw_rfkill = iwl_is_rfkill_set(trans);
1157 if (hw_rfkill)
1158 set_bit(STATUS_RFKILL, &trans->status);
1159 else
1160 clear_bit(STATUS_RFKILL, &trans->status);
1161 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1162 if (hw_rfkill && !run_in_rfkill) {
1163 ret = -ERFKILL;
1164 goto out;
1165 }
1166
1167 /* Someone called stop_device, don't try to start_fw */
1168 if (trans_pcie->is_down) {
1169 IWL_WARN(trans,
1170 "Can't start_fw since the HW hasn't been started\n");
1171 ret = -EIO;
1172 goto out;
1173 }
1174
1175 /* make sure rfkill handshake bits are cleared */
1176 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1177 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1178 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1179
1180 /* clear (again), then enable host interrupts */
1181 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1182
1183 ret = iwl_pcie_nic_init(trans);
1184 if (ret) {
1185 IWL_ERR(trans, "Unable to init nic\n");
1186 goto out;
1187 }
1188
1189 /*
1190 * Now, we load the firmware and don't want to be interrupted, even
1191 * by the RF-Kill interrupt (hence mask all the interrupt besides the
1192 * FH_TX interrupt which is needed to load the firmware). If the
1193 * RF-Kill switch is toggled, we will find out after having loaded
1194 * the firmware and return the proper value to the caller.
1195 */
1196 iwl_enable_fw_load_int(trans);
1197
1198 /* really make sure rfkill handshake bits are cleared */
1199 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1200 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1201
1202 /* Load the given image to the HW */
1203 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1204 ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1205 else
1206 ret = iwl_pcie_load_given_ucode(trans, fw);
1207 iwl_enable_interrupts(trans);
1208
1209 /* re-check RF-Kill state since we may have missed the interrupt */
1210 hw_rfkill = iwl_is_rfkill_set(trans);
1211 if (hw_rfkill)
1212 set_bit(STATUS_RFKILL, &trans->status);
1213 else
1214 clear_bit(STATUS_RFKILL, &trans->status);
1215
1216 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1217 if (hw_rfkill && !run_in_rfkill)
1218 ret = -ERFKILL;
1219
1220out:
1221 mutex_unlock(&trans_pcie->mutex);
1222 return ret;
1223}
1224
1225static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1226{
1227 iwl_pcie_reset_ict(trans);
1228 iwl_pcie_tx_start(trans, scd_addr);
1229}
1230
1201static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) 1231static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1202{ 1232{
1203 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1233 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
index 74c14ce28238..28f7010e7108 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rc.c
@@ -138,6 +138,11 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
138 ((wireless_mode == WIRELESS_MODE_N_5G) || 138 ((wireless_mode == WIRELESS_MODE_N_5G) ||
139 (wireless_mode == WIRELESS_MODE_N_24G))) 139 (wireless_mode == WIRELESS_MODE_N_24G)))
140 rate->flags |= IEEE80211_TX_RC_MCS; 140 rate->flags |= IEEE80211_TX_RC_MCS;
141 if (sta && sta->vht_cap.vht_supported &&
142 (wireless_mode == WIRELESS_MODE_AC_5G ||
143 wireless_mode == WIRELESS_MODE_AC_24G ||
144 wireless_mode == WIRELESS_MODE_AC_ONLY))
145 rate->flags |= IEEE80211_TX_RC_VHT_MCS;
141 } 146 }
142} 147}
143 148
diff --git a/drivers/net/wireless/ti/wlcore/io.c b/drivers/net/wireless/ti/wlcore/io.c
index 9ac118e727e9..564ca750c5ee 100644
--- a/drivers/net/wireless/ti/wlcore/io.c
+++ b/drivers/net/wireless/ti/wlcore/io.c
@@ -175,14 +175,14 @@ int wlcore_set_partition(struct wl1271 *wl,
175 if (ret < 0) 175 if (ret < 0)
176 goto out; 176 goto out;
177 177
178 /* We don't need the size of the last partition, as it is
179 * automatically calculated based on the total memory size and
180 * the sizes of the previous partitions.
181 */
178 ret = wlcore_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start); 182 ret = wlcore_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
179 if (ret < 0) 183 if (ret < 0)
180 goto out; 184 goto out;
181 185
182 ret = wlcore_raw_write32(wl, HW_PART3_SIZE_ADDR, p->mem3.size);
183 if (ret < 0)
184 goto out;
185
186out: 186out:
187 return ret; 187 return ret;
188} 188}
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index 6c257b54f415..10cf3747694d 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -36,8 +36,8 @@
36#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12) 36#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12)
37#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16) 37#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16)
38#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20) 38#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20)
39#define HW_PART3_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 24) 39#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
40#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 28) 40
41#define HW_ACCESS_REGISTER_SIZE 4 41#define HW_ACCESS_REGISTER_SIZE 4
42 42
43#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000 43#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 7e2c43f701bc..5d28e9405f32 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -382,18 +382,18 @@ static const struct nd_cmd_desc __nd_cmd_bus_descs[] = {
382 [ND_CMD_ARS_CAP] = { 382 [ND_CMD_ARS_CAP] = {
383 .in_num = 2, 383 .in_num = 2,
384 .in_sizes = { 8, 8, }, 384 .in_sizes = { 8, 8, },
385 .out_num = 2, 385 .out_num = 4,
386 .out_sizes = { 4, 4, }, 386 .out_sizes = { 4, 4, 4, 4, },
387 }, 387 },
388 [ND_CMD_ARS_START] = { 388 [ND_CMD_ARS_START] = {
389 .in_num = 4, 389 .in_num = 5,
390 .in_sizes = { 8, 8, 2, 6, }, 390 .in_sizes = { 8, 8, 2, 1, 5, },
391 .out_num = 1, 391 .out_num = 2,
392 .out_sizes = { 4, }, 392 .out_sizes = { 4, 4, },
393 }, 393 },
394 [ND_CMD_ARS_STATUS] = { 394 [ND_CMD_ARS_STATUS] = {
395 .out_num = 2, 395 .out_num = 3,
396 .out_sizes = { 4, UINT_MAX, }, 396 .out_sizes = { 4, 4, UINT_MAX, },
397 }, 397 },
398}; 398};
399 399
@@ -442,8 +442,8 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
442 return in_field[1]; 442 return in_field[1];
443 else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2) 443 else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2)
444 return out_field[1]; 444 return out_field[1];
445 else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 1) 445 else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2)
446 return ND_CMD_ARS_STATUS_MAX; 446 return out_field[1] - 8;
447 447
448 return UINT_MAX; 448 return UINT_MAX;
449} 449}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 7edf31671dab..8d0b54670184 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -41,7 +41,7 @@ struct pmem_device {
41 phys_addr_t phys_addr; 41 phys_addr_t phys_addr;
42 /* when non-zero this device is hosting a 'pfn' instance */ 42 /* when non-zero this device is hosting a 'pfn' instance */
43 phys_addr_t data_offset; 43 phys_addr_t data_offset;
44 unsigned long pfn_flags; 44 u64 pfn_flags;
45 void __pmem *virt_addr; 45 void __pmem *virt_addr;
46 size_t size; 46 size_t size;
47 struct badblocks bb; 47 struct badblocks bb;
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 5d6237391dcd..b586d84f2518 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -17,5 +17,6 @@ config BLK_DEV_NVME_SCSI
17 and block devices nodes, as well a a translation for a small 17 and block devices nodes, as well a a translation for a small
18 number of selected SCSI commands to NVMe commands to the NVMe 18 number of selected SCSI commands to NVMe commands to the NVMe
19 driver. If you don't know what this means you probably want 19 driver. If you don't know what this means you probably want
20 to say N here, and if you know what it means you probably 20 to say N here, unless you run a distro that abuses the SCSI
21 want to say N as well. 21 emulation to provide stable device names for mount by id, like
22 some OpenSuSE and SLES versions.
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index c5bf001af559..03c46412fff4 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -55,8 +55,9 @@ static void nvme_free_ns(struct kref *kref)
55 ns->disk->private_data = NULL; 55 ns->disk->private_data = NULL;
56 spin_unlock(&dev_list_lock); 56 spin_unlock(&dev_list_lock);
57 57
58 nvme_put_ctrl(ns->ctrl);
59 put_disk(ns->disk); 58 put_disk(ns->disk);
59 ida_simple_remove(&ns->ctrl->ns_ida, ns->instance);
60 nvme_put_ctrl(ns->ctrl);
60 kfree(ns); 61 kfree(ns);
61} 62}
62 63
@@ -183,7 +184,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
183 goto out_unmap; 184 goto out_unmap;
184 } 185 }
185 186
186 if (meta_buffer) { 187 if (meta_buffer && meta_len) {
187 struct bio_integrity_payload *bip; 188 struct bio_integrity_payload *bip;
188 189
189 meta = kmalloc(meta_len, GFP_KERNEL); 190 meta = kmalloc(meta_len, GFP_KERNEL);
@@ -373,6 +374,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
373 374
374 if (copy_from_user(&io, uio, sizeof(io))) 375 if (copy_from_user(&io, uio, sizeof(io)))
375 return -EFAULT; 376 return -EFAULT;
377 if (io.flags)
378 return -EINVAL;
376 379
377 switch (io.opcode) { 380 switch (io.opcode) {
378 case nvme_cmd_write: 381 case nvme_cmd_write:
@@ -424,6 +427,8 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
424 return -EACCES; 427 return -EACCES;
425 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 428 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
426 return -EFAULT; 429 return -EFAULT;
430 if (cmd.flags)
431 return -EINVAL;
427 432
428 memset(&c, 0, sizeof(c)); 433 memset(&c, 0, sizeof(c));
429 c.common.opcode = cmd.opcode; 434 c.common.opcode = cmd.opcode;
@@ -556,6 +561,10 @@ static int nvme_revalidate_disk(struct gendisk *disk)
556 u16 old_ms; 561 u16 old_ms;
557 unsigned short bs; 562 unsigned short bs;
558 563
564 if (test_bit(NVME_NS_DEAD, &ns->flags)) {
565 set_capacity(disk, 0);
566 return -ENODEV;
567 }
559 if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) { 568 if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) {
560 dev_warn(ns->ctrl->dev, "%s: Identify failure nvme%dn%d\n", 569 dev_warn(ns->ctrl->dev, "%s: Identify failure nvme%dn%d\n",
561 __func__, ns->ctrl->instance, ns->ns_id); 570 __func__, ns->ctrl->instance, ns->ns_id);
@@ -831,6 +840,23 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
831 return ret; 840 return ret;
832} 841}
833 842
843static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
844 struct request_queue *q)
845{
846 if (ctrl->max_hw_sectors) {
847 u32 max_segments =
848 (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
849
850 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
851 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
852 }
853 if (ctrl->stripe_size)
854 blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
855 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
856 blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
857 blk_queue_virt_boundary(q, ctrl->page_size - 1);
858}
859
834/* 860/*
835 * Initialize the cached copies of the Identify data and various controller 861 * Initialize the cached copies of the Identify data and various controller
836 * register in our nvme_ctrl structure. This should be called as soon as 862 * register in our nvme_ctrl structure. This should be called as soon as
@@ -888,6 +914,8 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
888 } 914 }
889 } 915 }
890 916
917 nvme_set_queue_limits(ctrl, ctrl->admin_q);
918
891 kfree(id); 919 kfree(id);
892 return 0; 920 return 0;
893} 921}
@@ -1118,10 +1146,13 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1118 if (!ns) 1146 if (!ns)
1119 return; 1147 return;
1120 1148
1149 ns->instance = ida_simple_get(&ctrl->ns_ida, 1, 0, GFP_KERNEL);
1150 if (ns->instance < 0)
1151 goto out_free_ns;
1152
1121 ns->queue = blk_mq_init_queue(ctrl->tagset); 1153 ns->queue = blk_mq_init_queue(ctrl->tagset);
1122 if (IS_ERR(ns->queue)) 1154 if (IS_ERR(ns->queue))
1123 goto out_free_ns; 1155 goto out_release_instance;
1124 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
1125 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); 1156 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
1126 ns->queue->queuedata = ns; 1157 ns->queue->queuedata = ns;
1127 ns->ctrl = ctrl; 1158 ns->ctrl = ctrl;
@@ -1135,17 +1166,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1135 ns->disk = disk; 1166 ns->disk = disk;
1136 ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ 1167 ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
1137 1168
1169
1138 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 1170 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
1139 if (ctrl->max_hw_sectors) { 1171 nvme_set_queue_limits(ctrl, ns->queue);
1140 blk_queue_max_hw_sectors(ns->queue, ctrl->max_hw_sectors);
1141 blk_queue_max_segments(ns->queue,
1142 (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1);
1143 }
1144 if (ctrl->stripe_size)
1145 blk_queue_chunk_sectors(ns->queue, ctrl->stripe_size >> 9);
1146 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
1147 blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
1148 blk_queue_virt_boundary(ns->queue, ctrl->page_size - 1);
1149 1172
1150 disk->major = nvme_major; 1173 disk->major = nvme_major;
1151 disk->first_minor = 0; 1174 disk->first_minor = 0;
@@ -1154,7 +1177,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1154 disk->queue = ns->queue; 1177 disk->queue = ns->queue;
1155 disk->driverfs_dev = ctrl->device; 1178 disk->driverfs_dev = ctrl->device;
1156 disk->flags = GENHD_FL_EXT_DEVT; 1179 disk->flags = GENHD_FL_EXT_DEVT;
1157 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, nsid); 1180 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
1158 1181
1159 if (nvme_revalidate_disk(ns->disk)) 1182 if (nvme_revalidate_disk(ns->disk))
1160 goto out_free_disk; 1183 goto out_free_disk;
@@ -1174,40 +1197,29 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1174 kfree(disk); 1197 kfree(disk);
1175 out_free_queue: 1198 out_free_queue:
1176 blk_cleanup_queue(ns->queue); 1199 blk_cleanup_queue(ns->queue);
1200 out_release_instance:
1201 ida_simple_remove(&ctrl->ns_ida, ns->instance);
1177 out_free_ns: 1202 out_free_ns:
1178 kfree(ns); 1203 kfree(ns);
1179} 1204}
1180 1205
1181static void nvme_ns_remove(struct nvme_ns *ns) 1206static void nvme_ns_remove(struct nvme_ns *ns)
1182{ 1207{
1183 bool kill = nvme_io_incapable(ns->ctrl) && 1208 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
1184 !blk_queue_dying(ns->queue); 1209 return;
1185
1186 lockdep_assert_held(&ns->ctrl->namespaces_mutex);
1187
1188 if (kill) {
1189 blk_set_queue_dying(ns->queue);
1190 1210
1191 /*
1192 * The controller was shutdown first if we got here through
1193 * device removal. The shutdown may requeue outstanding
1194 * requests. These need to be aborted immediately so
1195 * del_gendisk doesn't block indefinitely for their completion.
1196 */
1197 blk_mq_abort_requeue_list(ns->queue);
1198 }
1199 if (ns->disk->flags & GENHD_FL_UP) { 1211 if (ns->disk->flags & GENHD_FL_UP) {
1200 if (blk_get_integrity(ns->disk)) 1212 if (blk_get_integrity(ns->disk))
1201 blk_integrity_unregister(ns->disk); 1213 blk_integrity_unregister(ns->disk);
1202 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, 1214 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
1203 &nvme_ns_attr_group); 1215 &nvme_ns_attr_group);
1204 del_gendisk(ns->disk); 1216 del_gendisk(ns->disk);
1205 }
1206 if (kill || !blk_queue_dying(ns->queue)) {
1207 blk_mq_abort_requeue_list(ns->queue); 1217 blk_mq_abort_requeue_list(ns->queue);
1208 blk_cleanup_queue(ns->queue); 1218 blk_cleanup_queue(ns->queue);
1209 } 1219 }
1220 mutex_lock(&ns->ctrl->namespaces_mutex);
1210 list_del_init(&ns->list); 1221 list_del_init(&ns->list);
1222 mutex_unlock(&ns->ctrl->namespaces_mutex);
1211 nvme_put_ns(ns); 1223 nvme_put_ns(ns);
1212} 1224}
1213 1225
@@ -1301,10 +1313,8 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
1301{ 1313{
1302 struct nvme_ns *ns, *next; 1314 struct nvme_ns *ns, *next;
1303 1315
1304 mutex_lock(&ctrl->namespaces_mutex);
1305 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) 1316 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
1306 nvme_ns_remove(ns); 1317 nvme_ns_remove(ns);
1307 mutex_unlock(&ctrl->namespaces_mutex);
1308} 1318}
1309 1319
1310static DEFINE_IDA(nvme_instance_ida); 1320static DEFINE_IDA(nvme_instance_ida);
@@ -1351,6 +1361,7 @@ static void nvme_free_ctrl(struct kref *kref)
1351 1361
1352 put_device(ctrl->device); 1362 put_device(ctrl->device);
1353 nvme_release_instance(ctrl); 1363 nvme_release_instance(ctrl);
1364 ida_destroy(&ctrl->ns_ida);
1354 1365
1355 ctrl->ops->free_ctrl(ctrl); 1366 ctrl->ops->free_ctrl(ctrl);
1356} 1367}
@@ -1391,6 +1402,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
1391 } 1402 }
1392 get_device(ctrl->device); 1403 get_device(ctrl->device);
1393 dev_set_drvdata(ctrl->device, ctrl); 1404 dev_set_drvdata(ctrl->device, ctrl);
1405 ida_init(&ctrl->ns_ida);
1394 1406
1395 spin_lock(&dev_list_lock); 1407 spin_lock(&dev_list_lock);
1396 list_add_tail(&ctrl->node, &nvme_ctrl_list); 1408 list_add_tail(&ctrl->node, &nvme_ctrl_list);
@@ -1403,6 +1415,38 @@ out:
1403 return ret; 1415 return ret;
1404} 1416}
1405 1417
1418/**
1419 * nvme_kill_queues(): Ends all namespace queues
1420 * @ctrl: the dead controller that needs to end
1421 *
1422 * Call this function when the driver determines it is unable to get the
1423 * controller in a state capable of servicing IO.
1424 */
1425void nvme_kill_queues(struct nvme_ctrl *ctrl)
1426{
1427 struct nvme_ns *ns;
1428
1429 mutex_lock(&ctrl->namespaces_mutex);
1430 list_for_each_entry(ns, &ctrl->namespaces, list) {
1431 if (!kref_get_unless_zero(&ns->kref))
1432 continue;
1433
1434 /*
1435 * Revalidating a dead namespace sets capacity to 0. This will
1436 * end buffered writers dirtying pages that can't be synced.
1437 */
1438 if (!test_and_set_bit(NVME_NS_DEAD, &ns->flags))
1439 revalidate_disk(ns->disk);
1440
1441 blk_set_queue_dying(ns->queue);
1442 blk_mq_abort_requeue_list(ns->queue);
1443 blk_mq_start_stopped_hw_queues(ns->queue, true);
1444
1445 nvme_put_ns(ns);
1446 }
1447 mutex_unlock(&ctrl->namespaces_mutex);
1448}
1449
1406void nvme_stop_queues(struct nvme_ctrl *ctrl) 1450void nvme_stop_queues(struct nvme_ctrl *ctrl)
1407{ 1451{
1408 struct nvme_ns *ns; 1452 struct nvme_ns *ns;
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 5cd3725e2fa4..6bb15e4926dc 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -146,9 +146,10 @@ struct nvme_nvm_command {
146 }; 146 };
147}; 147};
148 148
149#define NVME_NVM_LP_MLC_PAIRS 886
149struct nvme_nvm_lp_mlc { 150struct nvme_nvm_lp_mlc {
150 __u16 num_pairs; 151 __u16 num_pairs;
151 __u8 pairs[886]; 152 __u8 pairs[NVME_NVM_LP_MLC_PAIRS];
152}; 153};
153 154
154struct nvme_nvm_lp_tbl { 155struct nvme_nvm_lp_tbl {
@@ -282,9 +283,14 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
282 memcpy(dst->lptbl.id, src->lptbl.id, 8); 283 memcpy(dst->lptbl.id, src->lptbl.id, 8);
283 dst->lptbl.mlc.num_pairs = 284 dst->lptbl.mlc.num_pairs =
284 le16_to_cpu(src->lptbl.mlc.num_pairs); 285 le16_to_cpu(src->lptbl.mlc.num_pairs);
285 /* 4 bits per pair */ 286
287 if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) {
288 pr_err("nvm: number of MLC pairs not supported\n");
289 return -EINVAL;
290 }
291
286 memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs, 292 memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
287 dst->lptbl.mlc.num_pairs >> 1); 293 dst->lptbl.mlc.num_pairs);
288 } 294 }
289 } 295 }
290 296
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 4fb5bb737868..fb15ba5f5d19 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -72,6 +72,7 @@ struct nvme_ctrl {
72 struct mutex namespaces_mutex; 72 struct mutex namespaces_mutex;
73 struct device *device; /* char device */ 73 struct device *device; /* char device */
74 struct list_head node; 74 struct list_head node;
75 struct ida ns_ida;
75 76
76 char name[12]; 77 char name[12];
77 char serial[20]; 78 char serial[20];
@@ -102,6 +103,7 @@ struct nvme_ns {
102 struct request_queue *queue; 103 struct request_queue *queue;
103 struct gendisk *disk; 104 struct gendisk *disk;
104 struct kref kref; 105 struct kref kref;
106 int instance;
105 107
106 u8 eui[8]; 108 u8 eui[8];
107 u8 uuid[16]; 109 u8 uuid[16];
@@ -112,6 +114,11 @@ struct nvme_ns {
112 bool ext; 114 bool ext;
113 u8 pi_type; 115 u8 pi_type;
114 int type; 116 int type;
117 unsigned long flags;
118
119#define NVME_NS_REMOVING 0
120#define NVME_NS_DEAD 1
121
115 u64 mode_select_num_blocks; 122 u64 mode_select_num_blocks;
116 u32 mode_select_block_len; 123 u32 mode_select_block_len;
117}; 124};
@@ -139,9 +146,9 @@ static inline bool nvme_io_incapable(struct nvme_ctrl *ctrl)
139 u32 val = 0; 146 u32 val = 0;
140 147
141 if (ctrl->ops->io_incapable(ctrl)) 148 if (ctrl->ops->io_incapable(ctrl))
142 return false; 149 return true;
143 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val)) 150 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
144 return false; 151 return true;
145 return val & NVME_CSTS_CFS; 152 return val & NVME_CSTS_CFS;
146} 153}
147 154
@@ -240,6 +247,7 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
240 247
241void nvme_stop_queues(struct nvme_ctrl *ctrl); 248void nvme_stop_queues(struct nvme_ctrl *ctrl);
242void nvme_start_queues(struct nvme_ctrl *ctrl); 249void nvme_start_queues(struct nvme_ctrl *ctrl);
250void nvme_kill_queues(struct nvme_ctrl *ctrl);
243 251
244struct request *nvme_alloc_request(struct request_queue *q, 252struct request *nvme_alloc_request(struct request_queue *q,
245 struct nvme_command *cmd, unsigned int flags); 253 struct nvme_command *cmd, unsigned int flags);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 72ef8322d32a..680f5780750c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -86,7 +86,6 @@ struct nvme_queue;
86 86
87static int nvme_reset(struct nvme_dev *dev); 87static int nvme_reset(struct nvme_dev *dev);
88static void nvme_process_cq(struct nvme_queue *nvmeq); 88static void nvme_process_cq(struct nvme_queue *nvmeq);
89static void nvme_remove_dead_ctrl(struct nvme_dev *dev);
90static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); 89static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
91 90
92/* 91/*
@@ -120,6 +119,7 @@ struct nvme_dev {
120 unsigned long flags; 119 unsigned long flags;
121 120
122#define NVME_CTRL_RESETTING 0 121#define NVME_CTRL_RESETTING 0
122#define NVME_CTRL_REMOVING 1
123 123
124 struct nvme_ctrl ctrl; 124 struct nvme_ctrl ctrl;
125 struct completion ioq_wait; 125 struct completion ioq_wait;
@@ -286,6 +286,17 @@ static int nvme_init_request(void *data, struct request *req,
286 return 0; 286 return 0;
287} 287}
288 288
289static void nvme_queue_scan(struct nvme_dev *dev)
290{
291 /*
292 * Do not queue new scan work when a controller is reset during
293 * removal.
294 */
295 if (test_bit(NVME_CTRL_REMOVING, &dev->flags))
296 return;
297 queue_work(nvme_workq, &dev->scan_work);
298}
299
289static void nvme_complete_async_event(struct nvme_dev *dev, 300static void nvme_complete_async_event(struct nvme_dev *dev,
290 struct nvme_completion *cqe) 301 struct nvme_completion *cqe)
291{ 302{
@@ -300,7 +311,7 @@ static void nvme_complete_async_event(struct nvme_dev *dev,
300 switch (result & 0xff07) { 311 switch (result & 0xff07) {
301 case NVME_AER_NOTICE_NS_CHANGED: 312 case NVME_AER_NOTICE_NS_CHANGED:
302 dev_info(dev->dev, "rescanning\n"); 313 dev_info(dev->dev, "rescanning\n");
303 queue_work(nvme_workq, &dev->scan_work); 314 nvme_queue_scan(dev);
304 default: 315 default:
305 dev_warn(dev->dev, "async event result %08x\n", result); 316 dev_warn(dev->dev, "async event result %08x\n", result);
306 } 317 }
@@ -678,6 +689,14 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
678 blk_mq_start_request(req); 689 blk_mq_start_request(req);
679 690
680 spin_lock_irq(&nvmeq->q_lock); 691 spin_lock_irq(&nvmeq->q_lock);
692 if (unlikely(nvmeq->cq_vector < 0)) {
693 if (ns && !test_bit(NVME_NS_DEAD, &ns->flags))
694 ret = BLK_MQ_RQ_QUEUE_BUSY;
695 else
696 ret = BLK_MQ_RQ_QUEUE_ERROR;
697 spin_unlock_irq(&nvmeq->q_lock);
698 goto out;
699 }
681 __nvme_submit_cmd(nvmeq, &cmnd); 700 __nvme_submit_cmd(nvmeq, &cmnd);
682 nvme_process_cq(nvmeq); 701 nvme_process_cq(nvmeq);
683 spin_unlock_irq(&nvmeq->q_lock); 702 spin_unlock_irq(&nvmeq->q_lock);
@@ -999,7 +1018,7 @@ static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved
999 if (!blk_mq_request_started(req)) 1018 if (!blk_mq_request_started(req))
1000 return; 1019 return;
1001 1020
1002 dev_warn(nvmeq->q_dmadev, 1021 dev_dbg_ratelimited(nvmeq->q_dmadev,
1003 "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid); 1022 "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
1004 1023
1005 status = NVME_SC_ABORT_REQ; 1024 status = NVME_SC_ABORT_REQ;
@@ -1245,6 +1264,12 @@ static struct blk_mq_ops nvme_mq_ops = {
1245static void nvme_dev_remove_admin(struct nvme_dev *dev) 1264static void nvme_dev_remove_admin(struct nvme_dev *dev)
1246{ 1265{
1247 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { 1266 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
1267 /*
1268 * If the controller was reset during removal, it's possible
1269 * user requests may be waiting on a stopped queue. Start the
1270 * queue to flush these to completion.
1271 */
1272 blk_mq_start_stopped_hw_queues(dev->ctrl.admin_q, true);
1248 blk_cleanup_queue(dev->ctrl.admin_q); 1273 blk_cleanup_queue(dev->ctrl.admin_q);
1249 blk_mq_free_tag_set(&dev->admin_tagset); 1274 blk_mq_free_tag_set(&dev->admin_tagset);
1250 } 1275 }
@@ -1685,14 +1710,14 @@ static int nvme_dev_add(struct nvme_dev *dev)
1685 return 0; 1710 return 0;
1686 dev->ctrl.tagset = &dev->tagset; 1711 dev->ctrl.tagset = &dev->tagset;
1687 } 1712 }
1688 queue_work(nvme_workq, &dev->scan_work); 1713 nvme_queue_scan(dev);
1689 return 0; 1714 return 0;
1690} 1715}
1691 1716
1692static int nvme_dev_map(struct nvme_dev *dev) 1717static int nvme_pci_enable(struct nvme_dev *dev)
1693{ 1718{
1694 u64 cap; 1719 u64 cap;
1695 int bars, result = -ENOMEM; 1720 int result = -ENOMEM;
1696 struct pci_dev *pdev = to_pci_dev(dev->dev); 1721 struct pci_dev *pdev = to_pci_dev(dev->dev);
1697 1722
1698 if (pci_enable_device_mem(pdev)) 1723 if (pci_enable_device_mem(pdev))
@@ -1700,24 +1725,14 @@ static int nvme_dev_map(struct nvme_dev *dev)
1700 1725
1701 dev->entry[0].vector = pdev->irq; 1726 dev->entry[0].vector = pdev->irq;
1702 pci_set_master(pdev); 1727 pci_set_master(pdev);
1703 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1704 if (!bars)
1705 goto disable_pci;
1706
1707 if (pci_request_selected_regions(pdev, bars, "nvme"))
1708 goto disable_pci;
1709 1728
1710 if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) && 1729 if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
1711 dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32))) 1730 dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32)))
1712 goto disable; 1731 goto disable;
1713 1732
1714 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
1715 if (!dev->bar)
1716 goto disable;
1717
1718 if (readl(dev->bar + NVME_REG_CSTS) == -1) { 1733 if (readl(dev->bar + NVME_REG_CSTS) == -1) {
1719 result = -ENODEV; 1734 result = -ENODEV;
1720 goto unmap; 1735 goto disable;
1721 } 1736 }
1722 1737
1723 /* 1738 /*
@@ -1727,7 +1742,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
1727 if (!pdev->irq) { 1742 if (!pdev->irq) {
1728 result = pci_enable_msix(pdev, dev->entry, 1); 1743 result = pci_enable_msix(pdev, dev->entry, 1);
1729 if (result < 0) 1744 if (result < 0)
1730 goto unmap; 1745 goto disable;
1731 } 1746 }
1732 1747
1733 cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 1748 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
@@ -1754,18 +1769,20 @@ static int nvme_dev_map(struct nvme_dev *dev)
1754 pci_save_state(pdev); 1769 pci_save_state(pdev);
1755 return 0; 1770 return 0;
1756 1771
1757 unmap:
1758 iounmap(dev->bar);
1759 dev->bar = NULL;
1760 disable: 1772 disable:
1761 pci_release_regions(pdev);
1762 disable_pci:
1763 pci_disable_device(pdev); 1773 pci_disable_device(pdev);
1764 return result; 1774 return result;
1765} 1775}
1766 1776
1767static void nvme_dev_unmap(struct nvme_dev *dev) 1777static void nvme_dev_unmap(struct nvme_dev *dev)
1768{ 1778{
1779 if (dev->bar)
1780 iounmap(dev->bar);
1781 pci_release_regions(to_pci_dev(dev->dev));
1782}
1783
1784static void nvme_pci_disable(struct nvme_dev *dev)
1785{
1769 struct pci_dev *pdev = to_pci_dev(dev->dev); 1786 struct pci_dev *pdev = to_pci_dev(dev->dev);
1770 1787
1771 if (pdev->msi_enabled) 1788 if (pdev->msi_enabled)
@@ -1773,12 +1790,6 @@ static void nvme_dev_unmap(struct nvme_dev *dev)
1773 else if (pdev->msix_enabled) 1790 else if (pdev->msix_enabled)
1774 pci_disable_msix(pdev); 1791 pci_disable_msix(pdev);
1775 1792
1776 if (dev->bar) {
1777 iounmap(dev->bar);
1778 dev->bar = NULL;
1779 pci_release_regions(pdev);
1780 }
1781
1782 if (pci_is_enabled(pdev)) { 1793 if (pci_is_enabled(pdev)) {
1783 pci_disable_pcie_error_reporting(pdev); 1794 pci_disable_pcie_error_reporting(pdev);
1784 pci_disable_device(pdev); 1795 pci_disable_device(pdev);
@@ -1837,7 +1848,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
1837 nvme_dev_list_remove(dev); 1848 nvme_dev_list_remove(dev);
1838 1849
1839 mutex_lock(&dev->shutdown_lock); 1850 mutex_lock(&dev->shutdown_lock);
1840 if (dev->bar) { 1851 if (pci_is_enabled(to_pci_dev(dev->dev))) {
1841 nvme_stop_queues(&dev->ctrl); 1852 nvme_stop_queues(&dev->ctrl);
1842 csts = readl(dev->bar + NVME_REG_CSTS); 1853 csts = readl(dev->bar + NVME_REG_CSTS);
1843 } 1854 }
@@ -1850,7 +1861,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
1850 nvme_disable_io_queues(dev); 1861 nvme_disable_io_queues(dev);
1851 nvme_disable_admin_queue(dev, shutdown); 1862 nvme_disable_admin_queue(dev, shutdown);
1852 } 1863 }
1853 nvme_dev_unmap(dev); 1864 nvme_pci_disable(dev);
1854 1865
1855 for (i = dev->queue_count - 1; i >= 0; i--) 1866 for (i = dev->queue_count - 1; i >= 0; i--)
1856 nvme_clear_queue(dev->queues[i]); 1867 nvme_clear_queue(dev->queues[i]);
@@ -1894,10 +1905,20 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
1894 kfree(dev); 1905 kfree(dev);
1895} 1906}
1896 1907
1908static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
1909{
1910 dev_warn(dev->dev, "Removing after probe failure status: %d\n", status);
1911
1912 kref_get(&dev->ctrl.kref);
1913 nvme_dev_disable(dev, false);
1914 if (!schedule_work(&dev->remove_work))
1915 nvme_put_ctrl(&dev->ctrl);
1916}
1917
1897static void nvme_reset_work(struct work_struct *work) 1918static void nvme_reset_work(struct work_struct *work)
1898{ 1919{
1899 struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work); 1920 struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
1900 int result; 1921 int result = -ENODEV;
1901 1922
1902 if (WARN_ON(test_bit(NVME_CTRL_RESETTING, &dev->flags))) 1923 if (WARN_ON(test_bit(NVME_CTRL_RESETTING, &dev->flags)))
1903 goto out; 1924 goto out;
@@ -1906,37 +1927,37 @@ static void nvme_reset_work(struct work_struct *work)
1906 * If we're called to reset a live controller first shut it down before 1927 * If we're called to reset a live controller first shut it down before
1907 * moving on. 1928 * moving on.
1908 */ 1929 */
1909 if (dev->bar) 1930 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
1910 nvme_dev_disable(dev, false); 1931 nvme_dev_disable(dev, false);
1911 1932
1912 set_bit(NVME_CTRL_RESETTING, &dev->flags); 1933 set_bit(NVME_CTRL_RESETTING, &dev->flags);
1913 1934
1914 result = nvme_dev_map(dev); 1935 result = nvme_pci_enable(dev);
1915 if (result) 1936 if (result)
1916 goto out; 1937 goto out;
1917 1938
1918 result = nvme_configure_admin_queue(dev); 1939 result = nvme_configure_admin_queue(dev);
1919 if (result) 1940 if (result)
1920 goto unmap; 1941 goto out;
1921 1942
1922 nvme_init_queue(dev->queues[0], 0); 1943 nvme_init_queue(dev->queues[0], 0);
1923 result = nvme_alloc_admin_tags(dev); 1944 result = nvme_alloc_admin_tags(dev);
1924 if (result) 1945 if (result)
1925 goto disable; 1946 goto out;
1926 1947
1927 result = nvme_init_identify(&dev->ctrl); 1948 result = nvme_init_identify(&dev->ctrl);
1928 if (result) 1949 if (result)
1929 goto free_tags; 1950 goto out;
1930 1951
1931 result = nvme_setup_io_queues(dev); 1952 result = nvme_setup_io_queues(dev);
1932 if (result) 1953 if (result)
1933 goto free_tags; 1954 goto out;
1934 1955
1935 dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS; 1956 dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS;
1936 1957
1937 result = nvme_dev_list_add(dev); 1958 result = nvme_dev_list_add(dev);
1938 if (result) 1959 if (result)
1939 goto remove; 1960 goto out;
1940 1961
1941 /* 1962 /*
1942 * Keep the controller around but remove all namespaces if we don't have 1963 * Keep the controller around but remove all namespaces if we don't have
@@ -1953,19 +1974,8 @@ static void nvme_reset_work(struct work_struct *work)
1953 clear_bit(NVME_CTRL_RESETTING, &dev->flags); 1974 clear_bit(NVME_CTRL_RESETTING, &dev->flags);
1954 return; 1975 return;
1955 1976
1956 remove:
1957 nvme_dev_list_remove(dev);
1958 free_tags:
1959 nvme_dev_remove_admin(dev);
1960 blk_put_queue(dev->ctrl.admin_q);
1961 dev->ctrl.admin_q = NULL;
1962 dev->queues[0]->tags = NULL;
1963 disable:
1964 nvme_disable_admin_queue(dev, false);
1965 unmap:
1966 nvme_dev_unmap(dev);
1967 out: 1977 out:
1968 nvme_remove_dead_ctrl(dev); 1978 nvme_remove_dead_ctrl(dev, result);
1969} 1979}
1970 1980
1971static void nvme_remove_dead_ctrl_work(struct work_struct *work) 1981static void nvme_remove_dead_ctrl_work(struct work_struct *work)
@@ -1973,19 +1983,12 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work)
1973 struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work); 1983 struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
1974 struct pci_dev *pdev = to_pci_dev(dev->dev); 1984 struct pci_dev *pdev = to_pci_dev(dev->dev);
1975 1985
1986 nvme_kill_queues(&dev->ctrl);
1976 if (pci_get_drvdata(pdev)) 1987 if (pci_get_drvdata(pdev))
1977 pci_stop_and_remove_bus_device_locked(pdev); 1988 pci_stop_and_remove_bus_device_locked(pdev);
1978 nvme_put_ctrl(&dev->ctrl); 1989 nvme_put_ctrl(&dev->ctrl);
1979} 1990}
1980 1991
1981static void nvme_remove_dead_ctrl(struct nvme_dev *dev)
1982{
1983 dev_warn(dev->dev, "Removing after probe failure\n");
1984 kref_get(&dev->ctrl.kref);
1985 if (!schedule_work(&dev->remove_work))
1986 nvme_put_ctrl(&dev->ctrl);
1987}
1988
1989static int nvme_reset(struct nvme_dev *dev) 1992static int nvme_reset(struct nvme_dev *dev)
1990{ 1993{
1991 if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q)) 1994 if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
@@ -2037,6 +2040,27 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
2037 .free_ctrl = nvme_pci_free_ctrl, 2040 .free_ctrl = nvme_pci_free_ctrl,
2038}; 2041};
2039 2042
2043static int nvme_dev_map(struct nvme_dev *dev)
2044{
2045 int bars;
2046 struct pci_dev *pdev = to_pci_dev(dev->dev);
2047
2048 bars = pci_select_bars(pdev, IORESOURCE_MEM);
2049 if (!bars)
2050 return -ENODEV;
2051 if (pci_request_selected_regions(pdev, bars, "nvme"))
2052 return -ENODEV;
2053
2054 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
2055 if (!dev->bar)
2056 goto release;
2057
2058 return 0;
2059 release:
2060 pci_release_regions(pdev);
2061 return -ENODEV;
2062}
2063
2040static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2064static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2041{ 2065{
2042 int node, result = -ENOMEM; 2066 int node, result = -ENOMEM;
@@ -2061,6 +2085,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2061 dev->dev = get_device(&pdev->dev); 2085 dev->dev = get_device(&pdev->dev);
2062 pci_set_drvdata(pdev, dev); 2086 pci_set_drvdata(pdev, dev);
2063 2087
2088 result = nvme_dev_map(dev);
2089 if (result)
2090 goto free;
2091
2064 INIT_LIST_HEAD(&dev->node); 2092 INIT_LIST_HEAD(&dev->node);
2065 INIT_WORK(&dev->scan_work, nvme_dev_scan); 2093 INIT_WORK(&dev->scan_work, nvme_dev_scan);
2066 INIT_WORK(&dev->reset_work, nvme_reset_work); 2094 INIT_WORK(&dev->reset_work, nvme_reset_work);
@@ -2084,6 +2112,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2084 nvme_release_prp_pools(dev); 2112 nvme_release_prp_pools(dev);
2085 put_pci: 2113 put_pci:
2086 put_device(dev->dev); 2114 put_device(dev->dev);
2115 nvme_dev_unmap(dev);
2087 free: 2116 free:
2088 kfree(dev->queues); 2117 kfree(dev->queues);
2089 kfree(dev->entry); 2118 kfree(dev->entry);
@@ -2107,24 +2136,27 @@ static void nvme_shutdown(struct pci_dev *pdev)
2107 nvme_dev_disable(dev, true); 2136 nvme_dev_disable(dev, true);
2108} 2137}
2109 2138
2139/*
2140 * The driver's remove may be called on a device in a partially initialized
2141 * state. This function must not have any dependencies on the device state in
2142 * order to proceed.
2143 */
2110static void nvme_remove(struct pci_dev *pdev) 2144static void nvme_remove(struct pci_dev *pdev)
2111{ 2145{
2112 struct nvme_dev *dev = pci_get_drvdata(pdev); 2146 struct nvme_dev *dev = pci_get_drvdata(pdev);
2113 2147
2114 spin_lock(&dev_list_lock); 2148 set_bit(NVME_CTRL_REMOVING, &dev->flags);
2115 list_del_init(&dev->node);
2116 spin_unlock(&dev_list_lock);
2117
2118 pci_set_drvdata(pdev, NULL); 2149 pci_set_drvdata(pdev, NULL);
2119 flush_work(&dev->reset_work);
2120 flush_work(&dev->scan_work); 2150 flush_work(&dev->scan_work);
2121 nvme_remove_namespaces(&dev->ctrl); 2151 nvme_remove_namespaces(&dev->ctrl);
2122 nvme_uninit_ctrl(&dev->ctrl); 2152 nvme_uninit_ctrl(&dev->ctrl);
2123 nvme_dev_disable(dev, true); 2153 nvme_dev_disable(dev, true);
2154 flush_work(&dev->reset_work);
2124 nvme_dev_remove_admin(dev); 2155 nvme_dev_remove_admin(dev);
2125 nvme_free_queues(dev, 0); 2156 nvme_free_queues(dev, 0);
2126 nvme_release_cmb(dev); 2157 nvme_release_cmb(dev);
2127 nvme_release_prp_pools(dev); 2158 nvme_release_prp_pools(dev);
2159 nvme_dev_unmap(dev);
2128 nvme_put_ctrl(&dev->ctrl); 2160 nvme_put_ctrl(&dev->ctrl);
2129} 2161}
2130 2162
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 7ee21ae305ae..e7bfc175b8e1 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -635,6 +635,13 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
635 msi_base = be32_to_cpup(msi_map + 2); 635 msi_base = be32_to_cpup(msi_map + 2);
636 rid_len = be32_to_cpup(msi_map + 3); 636 rid_len = be32_to_cpup(msi_map + 3);
637 637
638 if (rid_base & ~map_mask) {
639 dev_err(parent_dev,
640 "Invalid msi-map translation - msi-map-mask (0x%x) ignores rid-base (0x%x)\n",
641 map_mask, rid_base);
642 return rid_out;
643 }
644
638 msi_controller_node = of_find_node_by_phandle(phandle); 645 msi_controller_node = of_find_node_by_phandle(phandle);
639 646
640 matched = (masked_rid >= rid_base && 647 matched = (masked_rid >= rid_base &&
@@ -654,7 +661,7 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
654 if (!matched) 661 if (!matched)
655 return rid_out; 662 return rid_out;
656 663
657 rid_out = masked_rid + msi_base; 664 rid_out = masked_rid - rid_base + msi_base;
658 dev_dbg(dev, 665 dev_dbg(dev,
659 "msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n", 666 "msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n",
660 dev_name(parent_dev), map_mask, rid_base, msi_base, 667 dev_name(parent_dev), map_mask, rid_base, msi_base,
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 75a605426538..d1cdd9c992ac 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -14,6 +14,7 @@ config PCI_DRA7XX
14config PCI_MVEBU 14config PCI_MVEBU
15 bool "Marvell EBU PCIe controller" 15 bool "Marvell EBU PCIe controller"
16 depends on ARCH_MVEBU || ARCH_DOVE 16 depends on ARCH_MVEBU || ARCH_DOVE
17 depends on ARM
17 depends on OF 18 depends on OF
18 19
19config PCIE_DW 20config PCIE_DW
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
index ed34c9520a02..6153853ca9c3 100644
--- a/drivers/pci/host/pci-keystone-dw.c
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -58,11 +58,6 @@
58 58
59#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp) 59#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
60 60
61static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
62{
63 return sys->private_data;
64}
65
66static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset, 61static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
67 u32 *bit_pos) 62 u32 *bit_pos)
68{ 63{
@@ -108,7 +103,7 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
108 struct pcie_port *pp; 103 struct pcie_port *pp;
109 104
110 msi = irq_data_get_msi_desc(d); 105 msi = irq_data_get_msi_desc(d);
111 pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); 106 pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
112 ks_pcie = to_keystone_pcie(pp); 107 ks_pcie = to_keystone_pcie(pp);
113 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); 108 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
114 update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos); 109 update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
@@ -146,7 +141,7 @@ static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
146 u32 offset; 141 u32 offset;
147 142
148 msi = irq_data_get_msi_desc(d); 143 msi = irq_data_get_msi_desc(d);
149 pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); 144 pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
150 ks_pcie = to_keystone_pcie(pp); 145 ks_pcie = to_keystone_pcie(pp);
151 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); 146 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
152 147
@@ -167,7 +162,7 @@ static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
167 u32 offset; 162 u32 offset;
168 163
169 msi = irq_data_get_msi_desc(d); 164 msi = irq_data_get_msi_desc(d);
170 pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); 165 pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
171 ks_pcie = to_keystone_pcie(pp); 166 ks_pcie = to_keystone_pcie(pp);
172 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); 167 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
173 168
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index 3923bed93c7e..f39961bcf7aa 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -77,6 +77,16 @@ static void ls_pcie_fix_class(struct ls_pcie *pcie)
77 iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE); 77 iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE);
78} 78}
79 79
80/* Drop MSG TLP except for Vendor MSG */
81static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
82{
83 u32 val;
84
85 val = ioread32(pcie->dbi + PCIE_STRFMR1);
86 val &= 0xDFFFFFFF;
87 iowrite32(val, pcie->dbi + PCIE_STRFMR1);
88}
89
80static int ls1021_pcie_link_up(struct pcie_port *pp) 90static int ls1021_pcie_link_up(struct pcie_port *pp)
81{ 91{
82 u32 state; 92 u32 state;
@@ -97,7 +107,7 @@ static int ls1021_pcie_link_up(struct pcie_port *pp)
97static void ls1021_pcie_host_init(struct pcie_port *pp) 107static void ls1021_pcie_host_init(struct pcie_port *pp)
98{ 108{
99 struct ls_pcie *pcie = to_ls_pcie(pp); 109 struct ls_pcie *pcie = to_ls_pcie(pp);
100 u32 val, index[2]; 110 u32 index[2];
101 111
102 pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node, 112 pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node,
103 "fsl,pcie-scfg"); 113 "fsl,pcie-scfg");
@@ -116,13 +126,7 @@ static void ls1021_pcie_host_init(struct pcie_port *pp)
116 126
117 dw_pcie_setup_rc(pp); 127 dw_pcie_setup_rc(pp);
118 128
119 /* 129 ls_pcie_drop_msg_tlp(pcie);
120 * LS1021A Workaround for internal TKT228622
121 * to fix the INTx hang issue
122 */
123 val = ioread32(pcie->dbi + PCIE_STRFMR1);
124 val &= 0xffff;
125 iowrite32(val, pcie->dbi + PCIE_STRFMR1);
126} 130}
127 131
128static int ls_pcie_link_up(struct pcie_port *pp) 132static int ls_pcie_link_up(struct pcie_port *pp)
@@ -147,6 +151,7 @@ static void ls_pcie_host_init(struct pcie_port *pp)
147 iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN); 151 iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN);
148 ls_pcie_fix_class(pcie); 152 ls_pcie_fix_class(pcie);
149 ls_pcie_clear_multifunction(pcie); 153 ls_pcie_clear_multifunction(pcie);
154 ls_pcie_drop_msg_tlp(pcie);
150 iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN); 155 iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN);
151} 156}
152 157
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index c777b97207d5..5f70fee59a94 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -53,7 +53,7 @@ struct pcifront_device {
53}; 53};
54 54
55struct pcifront_sd { 55struct pcifront_sd {
56 int domain; 56 struct pci_sysdata sd;
57 struct pcifront_device *pdev; 57 struct pcifront_device *pdev;
58}; 58};
59 59
@@ -67,7 +67,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
67 unsigned int domain, unsigned int bus, 67 unsigned int domain, unsigned int bus,
68 struct pcifront_device *pdev) 68 struct pcifront_device *pdev)
69{ 69{
70 sd->domain = domain; 70 /* Because we do not expose that information via XenBus. */
71 sd->sd.node = first_online_node;
72 sd->sd.domain = domain;
71 sd->pdev = pdev; 73 sd->pdev = pdev;
72} 74}
73 75
@@ -468,8 +470,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
468 dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n", 470 dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
469 domain, bus); 471 domain, bus);
470 472
471 bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL); 473 bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
472 sd = kmalloc(sizeof(*sd), GFP_KERNEL); 474 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
473 if (!bus_entry || !sd) { 475 if (!bus_entry || !sd) {
474 err = -ENOMEM; 476 err = -ENOMEM;
475 goto err_out; 477 goto err_out;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 16d48a4ed225..e96e86d2e745 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -347,6 +347,7 @@ static int mtk_pconf_parse_conf(struct pinctrl_dev *pctldev,
347 ret = mtk_pconf_set_pull_select(pctl, pin, true, false, arg); 347 ret = mtk_pconf_set_pull_select(pctl, pin, true, false, arg);
348 break; 348 break;
349 case PIN_CONFIG_INPUT_ENABLE: 349 case PIN_CONFIG_INPUT_ENABLE:
350 mtk_pmx_gpio_set_direction(pctldev, NULL, pin, true);
350 ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param); 351 ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param);
351 break; 352 break;
352 case PIN_CONFIG_OUTPUT: 353 case PIN_CONFIG_OUTPUT:
@@ -354,6 +355,7 @@ static int mtk_pconf_parse_conf(struct pinctrl_dev *pctldev,
354 ret = mtk_pmx_gpio_set_direction(pctldev, NULL, pin, false); 355 ret = mtk_pmx_gpio_set_direction(pctldev, NULL, pin, false);
355 break; 356 break;
356 case PIN_CONFIG_INPUT_SCHMITT_ENABLE: 357 case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
358 mtk_pmx_gpio_set_direction(pctldev, NULL, pin, true);
357 ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param); 359 ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param);
358 break; 360 break;
359 case PIN_CONFIG_DRIVE_STRENGTH: 361 case PIN_CONFIG_DRIVE_STRENGTH:
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index e4d473811bb3..3ef798fac81b 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -666,16 +666,19 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
666 struct mvebu_mpp_ctrl_setting *set = &mode->settings[0]; 666 struct mvebu_mpp_ctrl_setting *set = &mode->settings[0];
667 struct mvebu_pinctrl_group *grp; 667 struct mvebu_pinctrl_group *grp;
668 unsigned num_settings; 668 unsigned num_settings;
669 unsigned supp_settings;
669 670
670 for (num_settings = 0; ; set++) { 671 for (num_settings = 0, supp_settings = 0; ; set++) {
671 if (!set->name) 672 if (!set->name)
672 break; 673 break;
673 674
675 num_settings++;
676
674 /* skip unsupported settings for this variant */ 677 /* skip unsupported settings for this variant */
675 if (pctl->variant && !(pctl->variant & set->variant)) 678 if (pctl->variant && !(pctl->variant & set->variant))
676 continue; 679 continue;
677 680
678 num_settings++; 681 supp_settings++;
679 682
680 /* find gpio/gpo/gpi settings */ 683 /* find gpio/gpo/gpi settings */
681 if (strcmp(set->name, "gpio") == 0) 684 if (strcmp(set->name, "gpio") == 0)
@@ -688,7 +691,7 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
688 } 691 }
689 692
690 /* skip modes with no settings for this variant */ 693 /* skip modes with no settings for this variant */
691 if (!num_settings) 694 if (!supp_settings)
692 continue; 695 continue;
693 696
694 grp = mvebu_pinctrl_find_group_by_pid(pctl, mode->pid); 697 grp = mvebu_pinctrl_find_group_by_pid(pctl, mode->pid);
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 085e60106ec2..1f7469c9857d 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -191,6 +191,7 @@ static void abx500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
191 dev_err(pct->dev, "%s write failed (%d)\n", __func__, ret); 191 dev_err(pct->dev, "%s write failed (%d)\n", __func__, ret);
192} 192}
193 193
194#ifdef CONFIG_DEBUG_FS
194static int abx500_get_pull_updown(struct abx500_pinctrl *pct, int offset, 195static int abx500_get_pull_updown(struct abx500_pinctrl *pct, int offset,
195 enum abx500_gpio_pull_updown *pull_updown) 196 enum abx500_gpio_pull_updown *pull_updown)
196{ 197{
@@ -226,6 +227,7 @@ out:
226 227
227 return ret; 228 return ret;
228} 229}
230#endif
229 231
230static int abx500_set_pull_updown(struct abx500_pinctrl *pct, 232static int abx500_set_pull_updown(struct abx500_pinctrl *pct,
231 int offset, enum abx500_gpio_pull_updown val) 233 int offset, enum abx500_gpio_pull_updown val)
@@ -468,6 +470,7 @@ out:
468 return ret; 470 return ret;
469} 471}
470 472
473#ifdef CONFIG_DEBUG_FS
471static int abx500_get_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip, 474static int abx500_get_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
472 unsigned gpio) 475 unsigned gpio)
473{ 476{
@@ -553,8 +556,6 @@ out:
553 return ret; 556 return ret;
554} 557}
555 558
556#ifdef CONFIG_DEBUG_FS
557
558#include <linux/seq_file.h> 559#include <linux/seq_file.h>
559 560
560static void abx500_gpio_dbg_show_one(struct seq_file *s, 561static void abx500_gpio_dbg_show_one(struct seq_file *s,
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
index d90e205cf809..216f227c6009 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
@@ -426,6 +426,7 @@ int pxa2xx_pinctrl_init(struct platform_device *pdev,
426 426
427 return 0; 427 return 0;
428} 428}
429EXPORT_SYMBOL(pxa2xx_pinctrl_init);
429 430
430int pxa2xx_pinctrl_exit(struct platform_device *pdev) 431int pxa2xx_pinctrl_exit(struct platform_device *pdev)
431{ 432{
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index f67b1e958589..5cc97f85db02 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -514,25 +514,35 @@ static const struct pinconf_ops samsung_pinconf_ops = {
514 .pin_config_group_set = samsung_pinconf_group_set, 514 .pin_config_group_set = samsung_pinconf_group_set,
515}; 515};
516 516
517/* gpiolib gpio_set callback function */ 517/*
518static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value) 518 * The samsung_gpio_set_vlaue() should be called with "bank->slock" held
519 * to avoid race condition.
520 */
521static void samsung_gpio_set_value(struct gpio_chip *gc,
522 unsigned offset, int value)
519{ 523{
520 struct samsung_pin_bank *bank = gpiochip_get_data(gc); 524 struct samsung_pin_bank *bank = gpiochip_get_data(gc);
521 const struct samsung_pin_bank_type *type = bank->type; 525 const struct samsung_pin_bank_type *type = bank->type;
522 unsigned long flags;
523 void __iomem *reg; 526 void __iomem *reg;
524 u32 data; 527 u32 data;
525 528
526 reg = bank->drvdata->virt_base + bank->pctl_offset; 529 reg = bank->drvdata->virt_base + bank->pctl_offset;
527 530
528 spin_lock_irqsave(&bank->slock, flags);
529
530 data = readl(reg + type->reg_offset[PINCFG_TYPE_DAT]); 531 data = readl(reg + type->reg_offset[PINCFG_TYPE_DAT]);
531 data &= ~(1 << offset); 532 data &= ~(1 << offset);
532 if (value) 533 if (value)
533 data |= 1 << offset; 534 data |= 1 << offset;
534 writel(data, reg + type->reg_offset[PINCFG_TYPE_DAT]); 535 writel(data, reg + type->reg_offset[PINCFG_TYPE_DAT]);
536}
537
538/* gpiolib gpio_set callback function */
539static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
540{
541 struct samsung_pin_bank *bank = gpiochip_get_data(gc);
542 unsigned long flags;
535 543
544 spin_lock_irqsave(&bank->slock, flags);
545 samsung_gpio_set_value(gc, offset, value);
536 spin_unlock_irqrestore(&bank->slock, flags); 546 spin_unlock_irqrestore(&bank->slock, flags);
537} 547}
538 548
@@ -553,6 +563,8 @@ static int samsung_gpio_get(struct gpio_chip *gc, unsigned offset)
553} 563}
554 564
555/* 565/*
566 * The samsung_gpio_set_direction() should be called with "bank->slock" held
567 * to avoid race condition.
556 * The calls to gpio_direction_output() and gpio_direction_input() 568 * The calls to gpio_direction_output() and gpio_direction_input()
557 * leads to this function call. 569 * leads to this function call.
558 */ 570 */
@@ -564,7 +576,6 @@ static int samsung_gpio_set_direction(struct gpio_chip *gc,
564 struct samsung_pinctrl_drv_data *drvdata; 576 struct samsung_pinctrl_drv_data *drvdata;
565 void __iomem *reg; 577 void __iomem *reg;
566 u32 data, mask, shift; 578 u32 data, mask, shift;
567 unsigned long flags;
568 579
569 bank = gpiochip_get_data(gc); 580 bank = gpiochip_get_data(gc);
570 type = bank->type; 581 type = bank->type;
@@ -581,31 +592,42 @@ static int samsung_gpio_set_direction(struct gpio_chip *gc,
581 reg += 4; 592 reg += 4;
582 } 593 }
583 594
584 spin_lock_irqsave(&bank->slock, flags);
585
586 data = readl(reg); 595 data = readl(reg);
587 data &= ~(mask << shift); 596 data &= ~(mask << shift);
588 if (!input) 597 if (!input)
589 data |= FUNC_OUTPUT << shift; 598 data |= FUNC_OUTPUT << shift;
590 writel(data, reg); 599 writel(data, reg);
591 600
592 spin_unlock_irqrestore(&bank->slock, flags);
593
594 return 0; 601 return 0;
595} 602}
596 603
597/* gpiolib gpio_direction_input callback function. */ 604/* gpiolib gpio_direction_input callback function. */
598static int samsung_gpio_direction_input(struct gpio_chip *gc, unsigned offset) 605static int samsung_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
599{ 606{
600 return samsung_gpio_set_direction(gc, offset, true); 607 struct samsung_pin_bank *bank = gpiochip_get_data(gc);
608 unsigned long flags;
609 int ret;
610
611 spin_lock_irqsave(&bank->slock, flags);
612 ret = samsung_gpio_set_direction(gc, offset, true);
613 spin_unlock_irqrestore(&bank->slock, flags);
614 return ret;
601} 615}
602 616
603/* gpiolib gpio_direction_output callback function. */ 617/* gpiolib gpio_direction_output callback function. */
604static int samsung_gpio_direction_output(struct gpio_chip *gc, unsigned offset, 618static int samsung_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
605 int value) 619 int value)
606{ 620{
607 samsung_gpio_set(gc, offset, value); 621 struct samsung_pin_bank *bank = gpiochip_get_data(gc);
608 return samsung_gpio_set_direction(gc, offset, false); 622 unsigned long flags;
623 int ret;
624
625 spin_lock_irqsave(&bank->slock, flags);
626 samsung_gpio_set_value(gc, offset, value);
627 ret = samsung_gpio_set_direction(gc, offset, false);
628 spin_unlock_irqrestore(&bank->slock, flags);
629
630 return ret;
609} 631}
610 632
611/* 633/*
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
index 77d4cf047cee..11760bbe9d51 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
@@ -492,6 +492,7 @@ static const struct sunxi_pinctrl_desc sun8i_h3_pinctrl_data = {
492 .pins = sun8i_h3_pins, 492 .pins = sun8i_h3_pins,
493 .npins = ARRAY_SIZE(sun8i_h3_pins), 493 .npins = ARRAY_SIZE(sun8i_h3_pins),
494 .irq_banks = 2, 494 .irq_banks = 2,
495 .irq_read_needs_mux = true
495}; 496};
496 497
497static int sun8i_h3_pinctrl_probe(struct platform_device *pdev) 498static int sun8i_h3_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/power/bq27xxx_battery_i2c.c b/drivers/power/bq27xxx_battery_i2c.c
index 9429e66be096..8eafc6f0df88 100644
--- a/drivers/power/bq27xxx_battery_i2c.c
+++ b/drivers/power/bq27xxx_battery_i2c.c
@@ -21,6 +21,9 @@
21 21
22#include <linux/power/bq27xxx_battery.h> 22#include <linux/power/bq27xxx_battery.h>
23 23
24static DEFINE_IDR(battery_id);
25static DEFINE_MUTEX(battery_mutex);
26
24static irqreturn_t bq27xxx_battery_irq_handler_thread(int irq, void *data) 27static irqreturn_t bq27xxx_battery_irq_handler_thread(int irq, void *data)
25{ 28{
26 struct bq27xxx_device_info *di = data; 29 struct bq27xxx_device_info *di = data;
@@ -70,19 +73,33 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
70{ 73{
71 struct bq27xxx_device_info *di; 74 struct bq27xxx_device_info *di;
72 int ret; 75 int ret;
76 char *name;
77 int num;
78
79 /* Get new ID for the new battery device */
80 mutex_lock(&battery_mutex);
81 num = idr_alloc(&battery_id, client, 0, 0, GFP_KERNEL);
82 mutex_unlock(&battery_mutex);
83 if (num < 0)
84 return num;
85
86 name = devm_kasprintf(&client->dev, GFP_KERNEL, "%s-%d", id->name, num);
87 if (!name)
88 goto err_mem;
73 89
74 di = devm_kzalloc(&client->dev, sizeof(*di), GFP_KERNEL); 90 di = devm_kzalloc(&client->dev, sizeof(*di), GFP_KERNEL);
75 if (!di) 91 if (!di)
76 return -ENOMEM; 92 goto err_mem;
77 93
94 di->id = num;
78 di->dev = &client->dev; 95 di->dev = &client->dev;
79 di->chip = id->driver_data; 96 di->chip = id->driver_data;
80 di->name = id->name; 97 di->name = name;
81 di->bus.read = bq27xxx_battery_i2c_read; 98 di->bus.read = bq27xxx_battery_i2c_read;
82 99
83 ret = bq27xxx_battery_setup(di); 100 ret = bq27xxx_battery_setup(di);
84 if (ret) 101 if (ret)
85 return ret; 102 goto err_failed;
86 103
87 /* Schedule a polling after about 1 min */ 104 /* Schedule a polling after about 1 min */
88 schedule_delayed_work(&di->work, 60 * HZ); 105 schedule_delayed_work(&di->work, 60 * HZ);
@@ -103,6 +120,16 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
103 } 120 }
104 121
105 return 0; 122 return 0;
123
124err_mem:
125 ret = -ENOMEM;
126
127err_failed:
128 mutex_lock(&battery_mutex);
129 idr_remove(&battery_id, num);
130 mutex_unlock(&battery_mutex);
131
132 return ret;
106} 133}
107 134
108static int bq27xxx_battery_i2c_remove(struct i2c_client *client) 135static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
@@ -111,6 +138,10 @@ static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
111 138
112 bq27xxx_battery_teardown(di); 139 bq27xxx_battery_teardown(di);
113 140
141 mutex_lock(&battery_mutex);
142 idr_remove(&battery_id, di->id);
143 mutex_unlock(&battery_mutex);
144
114 return 0; 145 return 0;
115} 146}
116 147
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 41605dac8309..c78db05e75b1 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3035,6 +3035,7 @@ static void dasd_setup_queue(struct dasd_block *block)
3035 max = block->base->discipline->max_blocks << block->s2b_shift; 3035 max = block->base->discipline->max_blocks << block->s2b_shift;
3036 } 3036 }
3037 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue); 3037 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue);
3038 block->request_queue->limits.max_dev_sectors = max;
3038 blk_queue_logical_block_size(block->request_queue, 3039 blk_queue_logical_block_size(block->request_queue,
3039 block->bp_block); 3040 block->bp_block);
3040 blk_queue_max_hw_sectors(block->request_queue, max); 3041 blk_queue_max_hw_sectors(block->request_queue, max);
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 184b1dbeb554..286782c60da4 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
264 spin_unlock_irqrestore(&lcu->lock, flags); 264 spin_unlock_irqrestore(&lcu->lock, flags);
265 cancel_work_sync(&lcu->suc_data.worker); 265 cancel_work_sync(&lcu->suc_data.worker);
266 spin_lock_irqsave(&lcu->lock, flags); 266 spin_lock_irqsave(&lcu->lock, flags);
267 if (device == lcu->suc_data.device) 267 if (device == lcu->suc_data.device) {
268 dasd_put_device(device);
268 lcu->suc_data.device = NULL; 269 lcu->suc_data.device = NULL;
270 }
269 } 271 }
270 was_pending = 0; 272 was_pending = 0;
271 if (device == lcu->ruac_data.device) { 273 if (device == lcu->ruac_data.device) {
@@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
273 was_pending = 1; 275 was_pending = 1;
274 cancel_delayed_work_sync(&lcu->ruac_data.dwork); 276 cancel_delayed_work_sync(&lcu->ruac_data.dwork);
275 spin_lock_irqsave(&lcu->lock, flags); 277 spin_lock_irqsave(&lcu->lock, flags);
276 if (device == lcu->ruac_data.device) 278 if (device == lcu->ruac_data.device) {
279 dasd_put_device(device);
277 lcu->ruac_data.device = NULL; 280 lcu->ruac_data.device = NULL;
281 }
278 } 282 }
279 private->lcu = NULL; 283 private->lcu = NULL;
280 spin_unlock_irqrestore(&lcu->lock, flags); 284 spin_unlock_irqrestore(&lcu->lock, flags);
@@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work)
549 if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) { 553 if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
550 DBF_DEV_EVENT(DBF_WARNING, device, "could not update" 554 DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
551 " alias data in lcu (rc = %d), retry later", rc); 555 " alias data in lcu (rc = %d), retry later", rc);
552 schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ); 556 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
557 dasd_put_device(device);
553 } else { 558 } else {
559 dasd_put_device(device);
554 lcu->ruac_data.device = NULL; 560 lcu->ruac_data.device = NULL;
555 lcu->flags &= ~UPDATE_PENDING; 561 lcu->flags &= ~UPDATE_PENDING;
556 } 562 }
@@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
593 */ 599 */
594 if (!usedev) 600 if (!usedev)
595 return -EINVAL; 601 return -EINVAL;
602 dasd_get_device(usedev);
596 lcu->ruac_data.device = usedev; 603 lcu->ruac_data.device = usedev;
597 schedule_delayed_work(&lcu->ruac_data.dwork, 0); 604 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
605 dasd_put_device(usedev);
598 return 0; 606 return 0;
599} 607}
600 608
@@ -723,7 +731,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
723 ASCEBC((char *) &cqr->magic, 4); 731 ASCEBC((char *) &cqr->magic, 4);
724 ccw = cqr->cpaddr; 732 ccw = cqr->cpaddr;
725 ccw->cmd_code = DASD_ECKD_CCW_RSCK; 733 ccw->cmd_code = DASD_ECKD_CCW_RSCK;
726 ccw->flags = 0 ; 734 ccw->flags = CCW_FLAG_SLI;
727 ccw->count = 16; 735 ccw->count = 16;
728 ccw->cda = (__u32)(addr_t) cqr->data; 736 ccw->cda = (__u32)(addr_t) cqr->data;
729 ((char *)cqr->data)[0] = reason; 737 ((char *)cqr->data)[0] = reason;
@@ -930,6 +938,7 @@ static void summary_unit_check_handling_work(struct work_struct *work)
930 /* 3. read new alias configuration */ 938 /* 3. read new alias configuration */
931 _schedule_lcu_update(lcu, device); 939 _schedule_lcu_update(lcu, device);
932 lcu->suc_data.device = NULL; 940 lcu->suc_data.device = NULL;
941 dasd_put_device(device);
933 spin_unlock_irqrestore(&lcu->lock, flags); 942 spin_unlock_irqrestore(&lcu->lock, flags);
934} 943}
935 944
@@ -989,6 +998,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
989 } 998 }
990 lcu->suc_data.reason = reason; 999 lcu->suc_data.reason = reason;
991 lcu->suc_data.device = device; 1000 lcu->suc_data.device = device;
1001 dasd_get_device(device);
992 spin_unlock(&lcu->lock); 1002 spin_unlock(&lcu->lock);
993 schedule_work(&lcu->suc_data.worker); 1003 if (!schedule_work(&lcu->suc_data.worker))
1004 dasd_put_device(device);
994}; 1005};
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 3b3e0998fa6e..d6a691e27d33 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4002,6 +4002,7 @@ static ssize_t ipr_store_update_fw(struct device *dev,
4002 struct ipr_sglist *sglist; 4002 struct ipr_sglist *sglist;
4003 char fname[100]; 4003 char fname[100];
4004 char *src; 4004 char *src;
4005 char *endline;
4005 int result, dnld_size; 4006 int result, dnld_size;
4006 4007
4007 if (!capable(CAP_SYS_ADMIN)) 4008 if (!capable(CAP_SYS_ADMIN))
@@ -4009,6 +4010,10 @@ static ssize_t ipr_store_update_fw(struct device *dev,
4009 4010
4010 snprintf(fname, sizeof(fname), "%s", buf); 4011 snprintf(fname, sizeof(fname), "%s", buf);
4011 4012
4013 endline = strchr(fname, '\n');
4014 if (endline)
4015 *endline = '\0';
4016
4012 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { 4017 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4013 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); 4018 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4014 return -EIO; 4019 return -EIO;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index da2e068ee47d..bbfbfd9e5aa3 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -206,6 +206,7 @@ static struct {
206 {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, 206 {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
207 {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN}, 207 {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
208 {"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES}, 208 {"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES},
209 {"Marvell", "91xx Config", "1.01", BLIST_SKIP_VPD_PAGES},
209 {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 210 {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
210 {"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, 211 {"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
211 {"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, 212 {"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index fa6b2c4eb7a2..8c6e31874171 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1344,6 +1344,7 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1344 1344
1345 switch (ret) { 1345 switch (ret) {
1346 case BLKPREP_KILL: 1346 case BLKPREP_KILL:
1347 case BLKPREP_INVALID:
1347 req->errors = DID_NO_CONNECT << 16; 1348 req->errors = DID_NO_CONNECT << 16;
1348 /* release the command and kill it */ 1349 /* release the command and kill it */
1349 if (req->special) { 1350 if (req->special) {
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 4f18a851e2c7..00bc7218a7f8 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1272,16 +1272,18 @@ static void __scsi_remove_target(struct scsi_target *starget)
1272void scsi_remove_target(struct device *dev) 1272void scsi_remove_target(struct device *dev)
1273{ 1273{
1274 struct Scsi_Host *shost = dev_to_shost(dev->parent); 1274 struct Scsi_Host *shost = dev_to_shost(dev->parent);
1275 struct scsi_target *starget; 1275 struct scsi_target *starget, *last_target = NULL;
1276 unsigned long flags; 1276 unsigned long flags;
1277 1277
1278restart: 1278restart:
1279 spin_lock_irqsave(shost->host_lock, flags); 1279 spin_lock_irqsave(shost->host_lock, flags);
1280 list_for_each_entry(starget, &shost->__targets, siblings) { 1280 list_for_each_entry(starget, &shost->__targets, siblings) {
1281 if (starget->state == STARGET_DEL) 1281 if (starget->state == STARGET_DEL ||
1282 starget == last_target)
1282 continue; 1283 continue;
1283 if (starget->dev.parent == dev || &starget->dev == dev) { 1284 if (starget->dev.parent == dev || &starget->dev == dev) {
1284 kref_get(&starget->reap_ref); 1285 kref_get(&starget->reap_ref);
1286 last_target = starget;
1285 spin_unlock_irqrestore(shost->host_lock, flags); 1287 spin_unlock_irqrestore(shost->host_lock, flags);
1286 __scsi_remove_target(starget); 1288 __scsi_remove_target(starget);
1287 scsi_target_reap(starget); 1289 scsi_target_reap(starget);
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index 91a003011acf..a9bac3bf20de 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -34,7 +34,7 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
34 34
35static int __init sh_pm_runtime_init(void) 35static int __init sh_pm_runtime_init(void)
36{ 36{
37 if (IS_ENABLED(CONFIG_ARCH_SHMOBILE)) { 37 if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
38 if (!of_find_compatible_node(NULL, NULL, 38 if (!of_find_compatible_node(NULL, NULL,
39 "renesas,cpg-mstp-clocks")) 39 "renesas,cpg-mstp-clocks"))
40 return 0; 40 return 0;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index aebad36391c9..8feac599e9ab 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1571,6 +1571,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
1571 1571
1572 as->use_cs_gpios = true; 1572 as->use_cs_gpios = true;
1573 if (atmel_spi_is_v2(as) && 1573 if (atmel_spi_is_v2(as) &&
1574 pdev->dev.of_node &&
1574 !of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) { 1575 !of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) {
1575 as->use_cs_gpios = false; 1576 as->use_cs_gpios = false;
1576 master->num_chipselect = 4; 1577 master->num_chipselect = 4;
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 7de6f8472a81..ecc73c0a97cf 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -73,8 +73,8 @@
73 73
74/* Bitfields in CNTL1 */ 74/* Bitfields in CNTL1 */
75#define BCM2835_AUX_SPI_CNTL1_CSHIGH 0x00000700 75#define BCM2835_AUX_SPI_CNTL1_CSHIGH 0x00000700
76#define BCM2835_AUX_SPI_CNTL1_IDLE 0x00000080 76#define BCM2835_AUX_SPI_CNTL1_TXEMPTY 0x00000080
77#define BCM2835_AUX_SPI_CNTL1_TXEMPTY 0x00000040 77#define BCM2835_AUX_SPI_CNTL1_IDLE 0x00000040
78#define BCM2835_AUX_SPI_CNTL1_MSBF_IN 0x00000002 78#define BCM2835_AUX_SPI_CNTL1_MSBF_IN 0x00000002
79#define BCM2835_AUX_SPI_CNTL1_KEEP_IN 0x00000001 79#define BCM2835_AUX_SPI_CNTL1_KEEP_IN 0x00000001
80 80
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 7fd6a4c009d2..7cb0c1921495 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -84,7 +84,7 @@ struct fsl_espi_transfer {
84/* SPCOM register values */ 84/* SPCOM register values */
85#define SPCOM_CS(x) ((x) << 30) 85#define SPCOM_CS(x) ((x) << 30)
86#define SPCOM_TRANLEN(x) ((x) << 0) 86#define SPCOM_TRANLEN(x) ((x) << 0)
87#define SPCOM_TRANLEN_MAX 0xFFFF /* Max transaction length */ 87#define SPCOM_TRANLEN_MAX 0x10000 /* Max transaction length */
88 88
89#define AUTOSUSPEND_TIMEOUT 2000 89#define AUTOSUSPEND_TIMEOUT 2000
90 90
@@ -233,7 +233,7 @@ static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
233 reinit_completion(&mpc8xxx_spi->done); 233 reinit_completion(&mpc8xxx_spi->done);
234 234
235 /* Set SPCOM[CS] and SPCOM[TRANLEN] field */ 235 /* Set SPCOM[CS] and SPCOM[TRANLEN] field */
236 if ((t->len - 1) > SPCOM_TRANLEN_MAX) { 236 if (t->len > SPCOM_TRANLEN_MAX) {
237 dev_err(mpc8xxx_spi->dev, "Transaction length (%d)" 237 dev_err(mpc8xxx_spi->dev, "Transaction length (%d)"
238 " beyond the SPCOM[TRANLEN] field\n", t->len); 238 " beyond the SPCOM[TRANLEN] field\n", t->len);
239 return -EINVAL; 239 return -EINVAL;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index d98c33cb64f9..6a4ff27f4357 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -929,7 +929,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
929 tx->sgl, tx->nents, DMA_MEM_TO_DEV, 929 tx->sgl, tx->nents, DMA_MEM_TO_DEV,
930 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 930 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
931 if (!desc_tx) 931 if (!desc_tx)
932 goto no_dma; 932 goto tx_nodma;
933 933
934 desc_tx->callback = spi_imx_dma_tx_callback; 934 desc_tx->callback = spi_imx_dma_tx_callback;
935 desc_tx->callback_param = (void *)spi_imx; 935 desc_tx->callback_param = (void *)spi_imx;
@@ -941,7 +941,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
941 rx->sgl, rx->nents, DMA_DEV_TO_MEM, 941 rx->sgl, rx->nents, DMA_DEV_TO_MEM,
942 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 942 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
943 if (!desc_rx) 943 if (!desc_rx)
944 goto no_dma; 944 goto rx_nodma;
945 945
946 desc_rx->callback = spi_imx_dma_rx_callback; 946 desc_rx->callback = spi_imx_dma_rx_callback;
947 desc_rx->callback_param = (void *)spi_imx; 947 desc_rx->callback_param = (void *)spi_imx;
@@ -1008,7 +1008,9 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
1008 1008
1009 return ret; 1009 return ret;
1010 1010
1011no_dma: 1011rx_nodma:
1012 dmaengine_terminate_all(master->dma_tx);
1013tx_nodma:
1012 pr_warn_once("%s %s: DMA not available, falling back to PIO\n", 1014 pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
1013 dev_driver_string(&master->dev), 1015 dev_driver_string(&master->dev),
1014 dev_name(&master->dev)); 1016 dev_name(&master->dev));
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index 894616f687b0..cf4bb36bee25 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -761,6 +761,7 @@ static int spi_test_run_iter(struct spi_device *spi,
761 test.iterate_transfer_mask = 1; 761 test.iterate_transfer_mask = 1;
762 762
763 /* count number of transfers with tx/rx_buf != NULL */ 763 /* count number of transfers with tx/rx_buf != NULL */
764 rx_count = tx_count = 0;
764 for (i = 0; i < test.transfer_count; i++) { 765 for (i = 0; i < test.transfer_count; i++) {
765 if (test.transfers[i].tx_buf) 766 if (test.transfers[i].tx_buf)
766 tx_count++; 767 tx_count++;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 7273820275e9..0caa3c8bef46 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1490,6 +1490,8 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
1490 return status; 1490 return status;
1491 1491
1492disable_pm: 1492disable_pm:
1493 pm_runtime_dont_use_autosuspend(&pdev->dev);
1494 pm_runtime_put_sync(&pdev->dev);
1493 pm_runtime_disable(&pdev->dev); 1495 pm_runtime_disable(&pdev->dev);
1494free_master: 1496free_master:
1495 spi_master_put(master); 1497 spi_master_put(master);
@@ -1501,6 +1503,7 @@ static int omap2_mcspi_remove(struct platform_device *pdev)
1501 struct spi_master *master = platform_get_drvdata(pdev); 1503 struct spi_master *master = platform_get_drvdata(pdev);
1502 struct omap2_mcspi *mcspi = spi_master_get_devdata(master); 1504 struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
1503 1505
1506 pm_runtime_dont_use_autosuspend(mcspi->dev);
1504 pm_runtime_put_sync(mcspi->dev); 1507 pm_runtime_put_sync(mcspi->dev);
1505 pm_runtime_disable(&pdev->dev); 1508 pm_runtime_disable(&pdev->dev);
1506 1509
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 3ec7e65a3ffa..db49af90217e 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -147,7 +147,7 @@ static int vpfe_prepare_pipeline(struct vpfe_video_device *video)
147 mutex_lock(&mdev->graph_mutex); 147 mutex_lock(&mdev->graph_mutex);
148 ret = media_entity_graph_walk_init(&graph, entity->graph_obj.mdev); 148 ret = media_entity_graph_walk_init(&graph, entity->graph_obj.mdev);
149 if (ret) { 149 if (ret) {
150 mutex_unlock(&video->lock); 150 mutex_unlock(&mdev->graph_mutex);
151 return -ENOMEM; 151 return -ENOMEM;
152 } 152 }
153 media_entity_graph_walk_start(&graph, entity); 153 media_entity_graph_walk_start(&graph, entity);
diff --git a/drivers/usb/chipidea/ci_hdrc_pci.c b/drivers/usb/chipidea/ci_hdrc_pci.c
index b59195edf636..b635ab67490d 100644
--- a/drivers/usb/chipidea/ci_hdrc_pci.c
+++ b/drivers/usb/chipidea/ci_hdrc_pci.c
@@ -85,8 +85,8 @@ static int ci_hdrc_pci_probe(struct pci_dev *pdev,
85 85
86 /* register a nop PHY */ 86 /* register a nop PHY */
87 ci->phy = usb_phy_generic_register(); 87 ci->phy = usb_phy_generic_register();
88 if (!ci->phy) 88 if (IS_ERR(ci->phy))
89 return -ENOMEM; 89 return PTR_ERR(ci->phy);
90 90
91 memset(res, 0, sizeof(res)); 91 memset(res, 0, sizeof(res));
92 res[0].start = pci_resource_start(pdev, 0); 92 res[0].start = pci_resource_start(pdev, 0);
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index a4f7db2e18dd..df47110bad2d 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -100,6 +100,9 @@ static ssize_t ci_port_test_write(struct file *file, const char __user *ubuf,
100 if (sscanf(buf, "%u", &mode) != 1) 100 if (sscanf(buf, "%u", &mode) != 1)
101 return -EINVAL; 101 return -EINVAL;
102 102
103 if (mode > 255)
104 return -EBADRQC;
105
103 pm_runtime_get_sync(ci->dev); 106 pm_runtime_get_sync(ci->dev);
104 spin_lock_irqsave(&ci->lock, flags); 107 spin_lock_irqsave(&ci->lock, flags);
105 ret = hw_port_test_set(ci, mode); 108 ret = hw_port_test_set(ci, mode);
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index 45f86da1d6d3..03b6743461d1 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -158,7 +158,7 @@ static void ci_otg_work(struct work_struct *work)
158int ci_hdrc_otg_init(struct ci_hdrc *ci) 158int ci_hdrc_otg_init(struct ci_hdrc *ci)
159{ 159{
160 INIT_WORK(&ci->work, ci_otg_work); 160 INIT_WORK(&ci->work, ci_otg_work);
161 ci->wq = create_singlethread_workqueue("ci_otg"); 161 ci->wq = create_freezable_workqueue("ci_otg");
162 if (!ci->wq) { 162 if (!ci->wq) {
163 dev_err(ci->dev, "can't create workqueue\n"); 163 dev_err(ci->dev, "can't create workqueue\n");
164 return -ENODEV; 164 return -ENODEV;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 350dcd9af5d8..51b436918f78 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -5401,6 +5401,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
5401 } 5401 }
5402 5402
5403 bos = udev->bos; 5403 bos = udev->bos;
5404 udev->bos = NULL;
5404 5405
5405 for (i = 0; i < SET_CONFIG_TRIES; ++i) { 5406 for (i = 0; i < SET_CONFIG_TRIES; ++i) {
5406 5407
@@ -5493,11 +5494,8 @@ done:
5493 usb_set_usb2_hardware_lpm(udev, 1); 5494 usb_set_usb2_hardware_lpm(udev, 1);
5494 usb_unlocked_enable_lpm(udev); 5495 usb_unlocked_enable_lpm(udev);
5495 usb_enable_ltm(udev); 5496 usb_enable_ltm(udev);
5496 /* release the new BOS descriptor allocated by hub_port_init() */ 5497 usb_release_bos_descriptor(udev);
5497 if (udev->bos != bos) { 5498 udev->bos = bos;
5498 usb_release_bos_descriptor(udev);
5499 udev->bos = bos;
5500 }
5501 return 0; 5499 return 0;
5502 5500
5503re_enumerate: 5501re_enumerate:
diff --git a/drivers/usb/dwc2/Kconfig b/drivers/usb/dwc2/Kconfig
index fd95ba6ec317..f0decc0d69b5 100644
--- a/drivers/usb/dwc2/Kconfig
+++ b/drivers/usb/dwc2/Kconfig
@@ -1,5 +1,6 @@
1config USB_DWC2 1config USB_DWC2
2 tristate "DesignWare USB2 DRD Core Support" 2 tristate "DesignWare USB2 DRD Core Support"
3 depends on HAS_DMA
3 depends on USB || USB_GADGET 4 depends on USB || USB_GADGET
4 help 5 help
5 Say Y here if your system has a Dual Role Hi-Speed USB 6 Say Y here if your system has a Dual Role Hi-Speed USB
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index e991d55914db..46c4ba75dc2a 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -619,6 +619,12 @@ void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg)
619 __func__, hsotg->dr_mode); 619 __func__, hsotg->dr_mode);
620 break; 620 break;
621 } 621 }
622
623 /*
624 * NOTE: This is required for some rockchip soc based
625 * platforms.
626 */
627 msleep(50);
622} 628}
623 629
624/* 630/*
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index 36606fc33c0d..a41274aa52ad 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -1174,14 +1174,11 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
1174 failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc, 1174 failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
1175 halt_status, n_bytes, 1175 halt_status, n_bytes,
1176 xfer_done); 1176 xfer_done);
1177 if (*xfer_done && urb->status != -EINPROGRESS) 1177 if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
1178 failed = 1;
1179
1180 if (failed) {
1181 dwc2_host_complete(hsotg, qtd, urb->status); 1178 dwc2_host_complete(hsotg, qtd, urb->status);
1182 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 1179 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1183 dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n", 1180 dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x\n",
1184 failed, *xfer_done, urb->status); 1181 failed, *xfer_done);
1185 return failed; 1182 return failed;
1186 } 1183 }
1187 1184
@@ -1236,21 +1233,23 @@ static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
1236 1233
1237 list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) { 1234 list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
1238 int i; 1235 int i;
1236 int qtd_desc_count;
1239 1237
1240 qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry); 1238 qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
1241 xfer_done = 0; 1239 xfer_done = 0;
1240 qtd_desc_count = qtd->n_desc;
1242 1241
1243 for (i = 0; i < qtd->n_desc; i++) { 1242 for (i = 0; i < qtd_desc_count; i++) {
1244 if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd, 1243 if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
1245 desc_num, halt_status, 1244 desc_num, halt_status,
1246 &xfer_done)) { 1245 &xfer_done))
1247 qtd = NULL; 1246 goto stop_scan;
1248 break; 1247
1249 }
1250 desc_num++; 1248 desc_num++;
1251 } 1249 }
1252 } 1250 }
1253 1251
1252stop_scan:
1254 if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) { 1253 if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
1255 /* 1254 /*
1256 * Resetting the data toggle for bulk and interrupt endpoints 1255 * Resetting the data toggle for bulk and interrupt endpoints
@@ -1258,7 +1257,7 @@ static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
1258 */ 1257 */
1259 if (halt_status == DWC2_HC_XFER_STALL) 1258 if (halt_status == DWC2_HC_XFER_STALL)
1260 qh->data_toggle = DWC2_HC_PID_DATA0; 1259 qh->data_toggle = DWC2_HC_PID_DATA0;
1261 else if (qtd) 1260 else
1262 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); 1261 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1263 } 1262 }
1264 1263
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index f8253803a050..cadba8b13c48 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -525,11 +525,19 @@ void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
525 u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT; 525 u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
526 526
527 if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) { 527 if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
528 if (WARN(!chan || !chan->qh,
529 "chan->qh must be specified for non-control eps\n"))
530 return;
531
528 if (pid == TSIZ_SC_MC_PID_DATA0) 532 if (pid == TSIZ_SC_MC_PID_DATA0)
529 chan->qh->data_toggle = DWC2_HC_PID_DATA0; 533 chan->qh->data_toggle = DWC2_HC_PID_DATA0;
530 else 534 else
531 chan->qh->data_toggle = DWC2_HC_PID_DATA1; 535 chan->qh->data_toggle = DWC2_HC_PID_DATA1;
532 } else { 536 } else {
537 if (WARN(!qtd,
538 "qtd must be specified for control eps\n"))
539 return;
540
533 if (pid == TSIZ_SC_MC_PID_DATA0) 541 if (pid == TSIZ_SC_MC_PID_DATA0)
534 qtd->data_toggle = DWC2_HC_PID_DATA0; 542 qtd->data_toggle = DWC2_HC_PID_DATA0;
535 else 543 else
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 29130682e547..e4f8b90d9627 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -856,7 +856,6 @@ struct dwc3 {
856 unsigned pullups_connected:1; 856 unsigned pullups_connected:1;
857 unsigned resize_fifos:1; 857 unsigned resize_fifos:1;
858 unsigned setup_packet_pending:1; 858 unsigned setup_packet_pending:1;
859 unsigned start_config_issued:1;
860 unsigned three_stage_setup:1; 859 unsigned three_stage_setup:1;
861 unsigned usb3_lpm_capable:1; 860 unsigned usb3_lpm_capable:1;
862 861
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 3a9354abcb68..8d6b75c2f53b 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -555,7 +555,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
555 int ret; 555 int ret;
556 u32 reg; 556 u32 reg;
557 557
558 dwc->start_config_issued = false;
559 cfg = le16_to_cpu(ctrl->wValue); 558 cfg = le16_to_cpu(ctrl->wValue);
560 559
561 switch (state) { 560 switch (state) {
@@ -737,10 +736,6 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
737 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY"); 736 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
738 ret = dwc3_ep0_set_isoch_delay(dwc, ctrl); 737 ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
739 break; 738 break;
740 case USB_REQ_SET_INTERFACE:
741 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
742 dwc->start_config_issued = false;
743 /* Fall through */
744 default: 739 default:
745 dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver"); 740 dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
746 ret = dwc3_ep0_delegate_req(dwc, ctrl); 741 ret = dwc3_ep0_delegate_req(dwc, ctrl);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 7d1dd82a95ac..2363bad45af8 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -385,24 +385,66 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
385 dep->trb_pool_dma = 0; 385 dep->trb_pool_dma = 0;
386} 386}
387 387
388static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
389
390/**
391 * dwc3_gadget_start_config - Configure EP resources
392 * @dwc: pointer to our controller context structure
393 * @dep: endpoint that is being enabled
394 *
395 * The assignment of transfer resources cannot perfectly follow the
396 * data book due to the fact that the controller driver does not have
397 * all knowledge of the configuration in advance. It is given this
398 * information piecemeal by the composite gadget framework after every
399 * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
400 * programming model in this scenario can cause errors. For two
401 * reasons:
402 *
403 * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
404 * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
405 * multiple interfaces.
406 *
407 * 2) The databook does not mention doing more DEPXFERCFG for new
408 * endpoint on alt setting (8.1.6).
409 *
410 * The following simplified method is used instead:
411 *
412 * All hardware endpoints can be assigned a transfer resource and this
413 * setting will stay persistent until either a core reset or
414 * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
415 * do DEPXFERCFG for every hardware endpoint as well. We are
416 * guaranteed that there are as many transfer resources as endpoints.
417 *
418 * This function is called for each endpoint when it is being enabled
419 * but is triggered only when called for EP0-out, which always happens
420 * first, and which should only happen in one of the above conditions.
421 */
388static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 422static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
389{ 423{
390 struct dwc3_gadget_ep_cmd_params params; 424 struct dwc3_gadget_ep_cmd_params params;
391 u32 cmd; 425 u32 cmd;
426 int i;
427 int ret;
428
429 if (dep->number)
430 return 0;
392 431
393 memset(&params, 0x00, sizeof(params)); 432 memset(&params, 0x00, sizeof(params));
433 cmd = DWC3_DEPCMD_DEPSTARTCFG;
394 434
395 if (dep->number != 1) { 435 ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
396 cmd = DWC3_DEPCMD_DEPSTARTCFG; 436 if (ret)
397 /* XferRscIdx == 0 for ep0 and 2 for the remaining */ 437 return ret;
398 if (dep->number > 1) {
399 if (dwc->start_config_issued)
400 return 0;
401 dwc->start_config_issued = true;
402 cmd |= DWC3_DEPCMD_PARAM(2);
403 }
404 438
405 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params); 439 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
440 struct dwc3_ep *dep = dwc->eps[i];
441
442 if (!dep)
443 continue;
444
445 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
446 if (ret)
447 return ret;
406 } 448 }
407 449
408 return 0; 450 return 0;
@@ -516,10 +558,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
516 struct dwc3_trb *trb_st_hw; 558 struct dwc3_trb *trb_st_hw;
517 struct dwc3_trb *trb_link; 559 struct dwc3_trb *trb_link;
518 560
519 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
520 if (ret)
521 return ret;
522
523 dep->endpoint.desc = desc; 561 dep->endpoint.desc = desc;
524 dep->comp_desc = comp_desc; 562 dep->comp_desc = comp_desc;
525 dep->type = usb_endpoint_type(desc); 563 dep->type = usb_endpoint_type(desc);
@@ -1636,8 +1674,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
1636 } 1674 }
1637 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1675 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1638 1676
1639 dwc->start_config_issued = false;
1640
1641 /* Start with SuperSpeed Default */ 1677 /* Start with SuperSpeed Default */
1642 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1678 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1643 1679
@@ -2237,7 +2273,6 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2237 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2273 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2238 2274
2239 dwc3_disconnect_gadget(dwc); 2275 dwc3_disconnect_gadget(dwc);
2240 dwc->start_config_issued = false;
2241 2276
2242 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2277 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2243 dwc->setup_packet_pending = false; 2278 dwc->setup_packet_pending = false;
@@ -2288,7 +2323,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2288 2323
2289 dwc3_stop_active_transfers(dwc); 2324 dwc3_stop_active_transfers(dwc);
2290 dwc3_clear_stall_all_ep(dwc); 2325 dwc3_clear_stall_all_ep(dwc);
2291 dwc->start_config_issued = false;
2292 2326
2293 /* Reset device address to zero */ 2327 /* Reset device address to zero */
2294 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2328 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 7e179f81d05c..87fb0fd6aaab 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -130,7 +130,8 @@ struct dev_data {
130 setup_can_stall : 1, 130 setup_can_stall : 1,
131 setup_out_ready : 1, 131 setup_out_ready : 1,
132 setup_out_error : 1, 132 setup_out_error : 1,
133 setup_abort : 1; 133 setup_abort : 1,
134 gadget_registered : 1;
134 unsigned setup_wLength; 135 unsigned setup_wLength;
135 136
136 /* the rest is basically write-once */ 137 /* the rest is basically write-once */
@@ -1179,7 +1180,8 @@ dev_release (struct inode *inode, struct file *fd)
1179 1180
1180 /* closing ep0 === shutdown all */ 1181 /* closing ep0 === shutdown all */
1181 1182
1182 usb_gadget_unregister_driver (&gadgetfs_driver); 1183 if (dev->gadget_registered)
1184 usb_gadget_unregister_driver (&gadgetfs_driver);
1183 1185
1184 /* at this point "good" hardware has disconnected the 1186 /* at this point "good" hardware has disconnected the
1185 * device from USB; the host won't see it any more. 1187 * device from USB; the host won't see it any more.
@@ -1847,6 +1849,7 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1847 * kick in after the ep0 descriptor is closed. 1849 * kick in after the ep0 descriptor is closed.
1848 */ 1850 */
1849 value = len; 1851 value = len;
1852 dev->gadget_registered = true;
1850 } 1853 }
1851 return value; 1854 return value;
1852 1855
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index 53c0692f1b09..93d28cb00b76 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -2340,7 +2340,7 @@ static struct qe_udc *qe_udc_config(struct platform_device *ofdev)
2340{ 2340{
2341 struct qe_udc *udc; 2341 struct qe_udc *udc;
2342 struct device_node *np = ofdev->dev.of_node; 2342 struct device_node *np = ofdev->dev.of_node;
2343 unsigned int tmp_addr = 0; 2343 unsigned long tmp_addr = 0;
2344 struct usb_device_para __iomem *usbpram; 2344 struct usb_device_para __iomem *usbpram;
2345 unsigned int i; 2345 unsigned int i;
2346 u64 size; 2346 u64 size;
diff --git a/drivers/usb/gadget/udc/net2280.h b/drivers/usb/gadget/udc/net2280.h
index 4dff60d34f73..0d32052bf16f 100644
--- a/drivers/usb/gadget/udc/net2280.h
+++ b/drivers/usb/gadget/udc/net2280.h
@@ -369,9 +369,20 @@ static inline void set_max_speed(struct net2280_ep *ep, u32 max)
369 static const u32 ep_enhanced[9] = { 0x10, 0x60, 0x30, 0x80, 369 static const u32 ep_enhanced[9] = { 0x10, 0x60, 0x30, 0x80,
370 0x50, 0x20, 0x70, 0x40, 0x90 }; 370 0x50, 0x20, 0x70, 0x40, 0x90 };
371 371
372 if (ep->dev->enhanced_mode) 372 if (ep->dev->enhanced_mode) {
373 reg = ep_enhanced[ep->num]; 373 reg = ep_enhanced[ep->num];
374 else{ 374 switch (ep->dev->gadget.speed) {
375 case USB_SPEED_SUPER:
376 reg += 2;
377 break;
378 case USB_SPEED_FULL:
379 reg += 1;
380 break;
381 case USB_SPEED_HIGH:
382 default:
383 break;
384 }
385 } else {
375 reg = (ep->num + 1) * 0x10; 386 reg = (ep->num + 1) * 0x10;
376 if (ep->dev->gadget.speed != USB_SPEED_HIGH) 387 if (ep->dev->gadget.speed != USB_SPEED_HIGH)
377 reg += 1; 388 reg += 1;
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index fd73a3ea07c2..b86a6f03592e 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -413,9 +413,10 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
413 if (!driver->udc_name || strcmp(driver->udc_name, 413 if (!driver->udc_name || strcmp(driver->udc_name,
414 dev_name(&udc->dev)) == 0) { 414 dev_name(&udc->dev)) == 0) {
415 ret = udc_bind_to_driver(udc, driver); 415 ret = udc_bind_to_driver(udc, driver);
416 if (ret != -EPROBE_DEFER)
417 list_del(&driver->pending);
416 if (ret) 418 if (ret)
417 goto err4; 419 goto err4;
418 list_del(&driver->pending);
419 break; 420 break;
420 } 421 }
421 } 422 }
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 795a45b1b25b..58487a473521 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -662,7 +662,7 @@ static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
662 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE); 662 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
663 csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */ 663 csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
664 } 664 }
665 channel->desired_mode = mode; 665 channel->desired_mode = *mode;
666 musb_writew(epio, MUSB_TXCSR, csr); 666 musb_writew(epio, MUSB_TXCSR, csr);
667 667
668 return 0; 668 return 0;
@@ -2003,10 +2003,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
2003 qh->offset, 2003 qh->offset,
2004 urb->transfer_buffer_length); 2004 urb->transfer_buffer_length);
2005 2005
2006 done = musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, 2006 if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
2007 urb, xfer_len, 2007 xfer_len, iso_err))
2008 iso_err);
2009 if (done)
2010 goto finish; 2008 goto finish;
2011 else 2009 else
2012 dev_err(musb->controller, "error: rx_dma failed\n"); 2010 dev_err(musb->controller, "error: rx_dma failed\n");
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 970a30e155cb..72b387d592c2 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -757,14 +757,8 @@ static int msm_otg_set_host(struct usb_otg *otg, struct usb_bus *host)
757 otg->host = host; 757 otg->host = host;
758 dev_dbg(otg->usb_phy->dev, "host driver registered w/ tranceiver\n"); 758 dev_dbg(otg->usb_phy->dev, "host driver registered w/ tranceiver\n");
759 759
760 /* 760 pm_runtime_get_sync(otg->usb_phy->dev);
761 * Kick the state machine work, if peripheral is not supported 761 schedule_work(&motg->sm_work);
762 * or peripheral is already registered with us.
763 */
764 if (motg->pdata->mode == USB_DR_MODE_HOST || otg->gadget) {
765 pm_runtime_get_sync(otg->usb_phy->dev);
766 schedule_work(&motg->sm_work);
767 }
768 762
769 return 0; 763 return 0;
770} 764}
@@ -827,14 +821,8 @@ static int msm_otg_set_peripheral(struct usb_otg *otg,
827 dev_dbg(otg->usb_phy->dev, 821 dev_dbg(otg->usb_phy->dev,
828 "peripheral driver registered w/ tranceiver\n"); 822 "peripheral driver registered w/ tranceiver\n");
829 823
830 /* 824 pm_runtime_get_sync(otg->usb_phy->dev);
831 * Kick the state machine work, if host is not supported 825 schedule_work(&motg->sm_work);
832 * or host is already registered with us.
833 */
834 if (motg->pdata->mode == USB_DR_MODE_PERIPHERAL || otg->host) {
835 pm_runtime_get_sync(otg->usb_phy->dev);
836 schedule_work(&motg->sm_work);
837 }
838 826
839 return 0; 827 return 0;
840} 828}
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index f612dda9c977..56ecb8b5115d 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -475,22 +475,6 @@ config USB_SERIAL_MOS7840
475 To compile this driver as a module, choose M here: the 475 To compile this driver as a module, choose M here: the
476 module will be called mos7840. If unsure, choose N. 476 module will be called mos7840. If unsure, choose N.
477 477
478config USB_SERIAL_MXUPORT11
479 tristate "USB Moxa UPORT 11x0 Serial Driver"
480 ---help---
481 Say Y here if you want to use a MOXA UPort 11x0 Serial hub.
482
483 This driver supports:
484
485 - UPort 1110 : 1 port RS-232 USB to Serial Hub.
486 - UPort 1130 : 1 port RS-422/485 USB to Serial Hub.
487 - UPort 1130I : 1 port RS-422/485 USB to Serial Hub with Isolation.
488 - UPort 1150 : 1 port RS-232/422/485 USB to Serial Hub.
489 - UPort 1150I : 1 port RS-232/422/485 USB to Serial Hub with Isolation.
490
491 To compile this driver as a module, choose M here: the
492 module will be called mxu11x0.
493
494config USB_SERIAL_MXUPORT 478config USB_SERIAL_MXUPORT
495 tristate "USB Moxa UPORT Serial Driver" 479 tristate "USB Moxa UPORT Serial Driver"
496 ---help--- 480 ---help---
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index f3fa5e53702d..349d9df0895f 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -38,7 +38,6 @@ obj-$(CONFIG_USB_SERIAL_METRO) += metro-usb.o
38obj-$(CONFIG_USB_SERIAL_MOS7720) += mos7720.o 38obj-$(CONFIG_USB_SERIAL_MOS7720) += mos7720.o
39obj-$(CONFIG_USB_SERIAL_MOS7840) += mos7840.o 39obj-$(CONFIG_USB_SERIAL_MOS7840) += mos7840.o
40obj-$(CONFIG_USB_SERIAL_MXUPORT) += mxuport.o 40obj-$(CONFIG_USB_SERIAL_MXUPORT) += mxuport.o
41obj-$(CONFIG_USB_SERIAL_MXUPORT11) += mxu11x0.o
42obj-$(CONFIG_USB_SERIAL_NAVMAN) += navman.o 41obj-$(CONFIG_USB_SERIAL_NAVMAN) += navman.o
43obj-$(CONFIG_USB_SERIAL_OMNINET) += omninet.o 42obj-$(CONFIG_USB_SERIAL_OMNINET) += omninet.o
44obj-$(CONFIG_USB_SERIAL_OPTICON) += opticon.o 43obj-$(CONFIG_USB_SERIAL_OPTICON) += opticon.o
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 987813b8a7f9..73a366de5102 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -163,6 +163,9 @@ static const struct usb_device_id id_table[] = {
163 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ 163 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
164 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ 164 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
165 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ 165 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
166 { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
167 { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
168 { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
166 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ 169 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
167 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ 170 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
168 { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ 171 { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
diff --git a/drivers/usb/serial/mxu11x0.c b/drivers/usb/serial/mxu11x0.c
deleted file mode 100644
index 619607323bfd..000000000000
--- a/drivers/usb/serial/mxu11x0.c
+++ /dev/null
@@ -1,1006 +0,0 @@
1/*
2 * USB Moxa UPORT 11x0 Serial Driver
3 *
4 * Copyright (C) 2007 MOXA Technologies Co., Ltd.
5 * Copyright (C) 2015 Mathieu Othacehe <m.othacehe@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 *
13 * Supports the following Moxa USB to serial converters:
14 * UPort 1110, 1 port RS-232 USB to Serial Hub.
15 * UPort 1130, 1 port RS-422/485 USB to Serial Hub.
16 * UPort 1130I, 1 port RS-422/485 USB to Serial Hub with isolation
17 * protection.
18 * UPort 1150, 1 port RS-232/422/485 USB to Serial Hub.
19 * UPort 1150I, 1 port RS-232/422/485 USB to Serial Hub with isolation
20 * protection.
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/firmware.h>
26#include <linux/jiffies.h>
27#include <linux/serial.h>
28#include <linux/serial_reg.h>
29#include <linux/slab.h>
30#include <linux/spinlock.h>
31#include <linux/mutex.h>
32#include <linux/tty.h>
33#include <linux/tty_driver.h>
34#include <linux/tty_flip.h>
35#include <linux/uaccess.h>
36#include <linux/usb.h>
37#include <linux/usb/serial.h>
38
39/* Vendor and product ids */
40#define MXU1_VENDOR_ID 0x110a
41#define MXU1_1110_PRODUCT_ID 0x1110
42#define MXU1_1130_PRODUCT_ID 0x1130
43#define MXU1_1150_PRODUCT_ID 0x1150
44#define MXU1_1151_PRODUCT_ID 0x1151
45#define MXU1_1131_PRODUCT_ID 0x1131
46
47/* Commands */
48#define MXU1_GET_VERSION 0x01
49#define MXU1_GET_PORT_STATUS 0x02
50#define MXU1_GET_PORT_DEV_INFO 0x03
51#define MXU1_GET_CONFIG 0x04
52#define MXU1_SET_CONFIG 0x05
53#define MXU1_OPEN_PORT 0x06
54#define MXU1_CLOSE_PORT 0x07
55#define MXU1_START_PORT 0x08
56#define MXU1_STOP_PORT 0x09
57#define MXU1_TEST_PORT 0x0A
58#define MXU1_PURGE_PORT 0x0B
59#define MXU1_RESET_EXT_DEVICE 0x0C
60#define MXU1_GET_OUTQUEUE 0x0D
61#define MXU1_WRITE_DATA 0x80
62#define MXU1_READ_DATA 0x81
63#define MXU1_REQ_TYPE_CLASS 0x82
64
65/* Module identifiers */
66#define MXU1_I2C_PORT 0x01
67#define MXU1_IEEE1284_PORT 0x02
68#define MXU1_UART1_PORT 0x03
69#define MXU1_UART2_PORT 0x04
70#define MXU1_RAM_PORT 0x05
71
72/* Modem status */
73#define MXU1_MSR_DELTA_CTS 0x01
74#define MXU1_MSR_DELTA_DSR 0x02
75#define MXU1_MSR_DELTA_RI 0x04
76#define MXU1_MSR_DELTA_CD 0x08
77#define MXU1_MSR_CTS 0x10
78#define MXU1_MSR_DSR 0x20
79#define MXU1_MSR_RI 0x40
80#define MXU1_MSR_CD 0x80
81#define MXU1_MSR_DELTA_MASK 0x0F
82#define MXU1_MSR_MASK 0xF0
83
84/* Line status */
85#define MXU1_LSR_OVERRUN_ERROR 0x01
86#define MXU1_LSR_PARITY_ERROR 0x02
87#define MXU1_LSR_FRAMING_ERROR 0x04
88#define MXU1_LSR_BREAK 0x08
89#define MXU1_LSR_ERROR 0x0F
90#define MXU1_LSR_RX_FULL 0x10
91#define MXU1_LSR_TX_EMPTY 0x20
92
93/* Modem control */
94#define MXU1_MCR_LOOP 0x04
95#define MXU1_MCR_DTR 0x10
96#define MXU1_MCR_RTS 0x20
97
98/* Mask settings */
99#define MXU1_UART_ENABLE_RTS_IN 0x0001
100#define MXU1_UART_DISABLE_RTS 0x0002
101#define MXU1_UART_ENABLE_PARITY_CHECKING 0x0008
102#define MXU1_UART_ENABLE_DSR_OUT 0x0010
103#define MXU1_UART_ENABLE_CTS_OUT 0x0020
104#define MXU1_UART_ENABLE_X_OUT 0x0040
105#define MXU1_UART_ENABLE_XA_OUT 0x0080
106#define MXU1_UART_ENABLE_X_IN 0x0100
107#define MXU1_UART_ENABLE_DTR_IN 0x0800
108#define MXU1_UART_DISABLE_DTR 0x1000
109#define MXU1_UART_ENABLE_MS_INTS 0x2000
110#define MXU1_UART_ENABLE_AUTO_START_DMA 0x4000
111#define MXU1_UART_SEND_BREAK_SIGNAL 0x8000
112
113/* Parity */
114#define MXU1_UART_NO_PARITY 0x00
115#define MXU1_UART_ODD_PARITY 0x01
116#define MXU1_UART_EVEN_PARITY 0x02
117#define MXU1_UART_MARK_PARITY 0x03
118#define MXU1_UART_SPACE_PARITY 0x04
119
120/* Stop bits */
121#define MXU1_UART_1_STOP_BITS 0x00
122#define MXU1_UART_1_5_STOP_BITS 0x01
123#define MXU1_UART_2_STOP_BITS 0x02
124
125/* Bits per character */
126#define MXU1_UART_5_DATA_BITS 0x00
127#define MXU1_UART_6_DATA_BITS 0x01
128#define MXU1_UART_7_DATA_BITS 0x02
129#define MXU1_UART_8_DATA_BITS 0x03
130
131/* Operation modes */
132#define MXU1_UART_232 0x00
133#define MXU1_UART_485_RECEIVER_DISABLED 0x01
134#define MXU1_UART_485_RECEIVER_ENABLED 0x02
135
136/* Pipe transfer mode and timeout */
137#define MXU1_PIPE_MODE_CONTINUOUS 0x01
138#define MXU1_PIPE_MODE_MASK 0x03
139#define MXU1_PIPE_TIMEOUT_MASK 0x7C
140#define MXU1_PIPE_TIMEOUT_ENABLE 0x80
141
142/* Config struct */
143struct mxu1_uart_config {
144 __be16 wBaudRate;
145 __be16 wFlags;
146 u8 bDataBits;
147 u8 bParity;
148 u8 bStopBits;
149 char cXon;
150 char cXoff;
151 u8 bUartMode;
152} __packed;
153
154/* Purge modes */
155#define MXU1_PURGE_OUTPUT 0x00
156#define MXU1_PURGE_INPUT 0x80
157
158/* Read/Write data */
159#define MXU1_RW_DATA_ADDR_SFR 0x10
160#define MXU1_RW_DATA_ADDR_IDATA 0x20
161#define MXU1_RW_DATA_ADDR_XDATA 0x30
162#define MXU1_RW_DATA_ADDR_CODE 0x40
163#define MXU1_RW_DATA_ADDR_GPIO 0x50
164#define MXU1_RW_DATA_ADDR_I2C 0x60
165#define MXU1_RW_DATA_ADDR_FLASH 0x70
166#define MXU1_RW_DATA_ADDR_DSP 0x80
167
168#define MXU1_RW_DATA_UNSPECIFIED 0x00
169#define MXU1_RW_DATA_BYTE 0x01
170#define MXU1_RW_DATA_WORD 0x02
171#define MXU1_RW_DATA_DOUBLE_WORD 0x04
172
173struct mxu1_write_data_bytes {
174 u8 bAddrType;
175 u8 bDataType;
176 u8 bDataCounter;
177 __be16 wBaseAddrHi;
178 __be16 wBaseAddrLo;
179 u8 bData[0];
180} __packed;
181
182/* Interrupt codes */
183#define MXU1_CODE_HARDWARE_ERROR 0xFF
184#define MXU1_CODE_DATA_ERROR 0x03
185#define MXU1_CODE_MODEM_STATUS 0x04
186
187static inline int mxu1_get_func_from_code(unsigned char code)
188{
189 return code & 0x0f;
190}
191
192/* Download firmware max packet size */
193#define MXU1_DOWNLOAD_MAX_PACKET_SIZE 64
194
195/* Firmware image header */
196struct mxu1_firmware_header {
197 __le16 wLength;
198 u8 bCheckSum;
199} __packed;
200
201#define MXU1_UART_BASE_ADDR 0xFFA0
202#define MXU1_UART_OFFSET_MCR 0x0004
203
204#define MXU1_BAUD_BASE 923077
205
206#define MXU1_TRANSFER_TIMEOUT 2
207#define MXU1_DOWNLOAD_TIMEOUT 1000
208#define MXU1_DEFAULT_CLOSING_WAIT 4000 /* in .01 secs */
209
210struct mxu1_port {
211 u8 msr;
212 u8 mcr;
213 u8 uart_mode;
214 spinlock_t spinlock; /* Protects msr */
215 struct mutex mutex; /* Protects mcr */
216 bool send_break;
217};
218
219struct mxu1_device {
220 u16 mxd_model;
221};
222
223static const struct usb_device_id mxu1_idtable[] = {
224 { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1110_PRODUCT_ID) },
225 { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1130_PRODUCT_ID) },
226 { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
227 { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
228 { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
229 { }
230};
231
232MODULE_DEVICE_TABLE(usb, mxu1_idtable);
233
234/* Write the given buffer out to the control pipe. */
235static int mxu1_send_ctrl_data_urb(struct usb_serial *serial,
236 u8 request,
237 u16 value, u16 index,
238 void *data, size_t size)
239{
240 int status;
241
242 status = usb_control_msg(serial->dev,
243 usb_sndctrlpipe(serial->dev, 0),
244 request,
245 (USB_DIR_OUT | USB_TYPE_VENDOR |
246 USB_RECIP_DEVICE), value, index,
247 data, size,
248 USB_CTRL_SET_TIMEOUT);
249 if (status < 0) {
250 dev_err(&serial->interface->dev,
251 "%s - usb_control_msg failed: %d\n",
252 __func__, status);
253 return status;
254 }
255
256 if (status != size) {
257 dev_err(&serial->interface->dev,
258 "%s - short write (%d / %zd)\n",
259 __func__, status, size);
260 return -EIO;
261 }
262
263 return 0;
264}
265
266/* Send a vendor request without any data */
267static int mxu1_send_ctrl_urb(struct usb_serial *serial,
268 u8 request, u16 value, u16 index)
269{
270 return mxu1_send_ctrl_data_urb(serial, request, value, index,
271 NULL, 0);
272}
273
274static int mxu1_download_firmware(struct usb_serial *serial,
275 const struct firmware *fw_p)
276{
277 int status = 0;
278 int buffer_size;
279 int pos;
280 int len;
281 int done;
282 u8 cs = 0;
283 u8 *buffer;
284 struct usb_device *dev = serial->dev;
285 struct mxu1_firmware_header *header;
286 unsigned int pipe;
287
288 pipe = usb_sndbulkpipe(dev, serial->port[0]->bulk_out_endpointAddress);
289
290 buffer_size = fw_p->size + sizeof(*header);
291 buffer = kmalloc(buffer_size, GFP_KERNEL);
292 if (!buffer)
293 return -ENOMEM;
294
295 memcpy(buffer, fw_p->data, fw_p->size);
296 memset(buffer + fw_p->size, 0xff, buffer_size - fw_p->size);
297
298 for (pos = sizeof(*header); pos < buffer_size; pos++)
299 cs = (u8)(cs + buffer[pos]);
300
301 header = (struct mxu1_firmware_header *)buffer;
302 header->wLength = cpu_to_le16(buffer_size - sizeof(*header));
303 header->bCheckSum = cs;
304
305 dev_dbg(&dev->dev, "%s - downloading firmware\n", __func__);
306
307 for (pos = 0; pos < buffer_size; pos += done) {
308 len = min(buffer_size - pos, MXU1_DOWNLOAD_MAX_PACKET_SIZE);
309
310 status = usb_bulk_msg(dev, pipe, buffer + pos, len, &done,
311 MXU1_DOWNLOAD_TIMEOUT);
312 if (status)
313 break;
314 }
315
316 kfree(buffer);
317
318 if (status) {
319 dev_err(&dev->dev, "failed to download firmware: %d\n", status);
320 return status;
321 }
322
323 msleep_interruptible(100);
324 usb_reset_device(dev);
325
326 dev_dbg(&dev->dev, "%s - download successful\n", __func__);
327
328 return 0;
329}
330
331static int mxu1_port_probe(struct usb_serial_port *port)
332{
333 struct mxu1_port *mxport;
334 struct mxu1_device *mxdev;
335
336 if (!port->interrupt_in_urb) {
337 dev_err(&port->dev, "no interrupt urb\n");
338 return -ENODEV;
339 }
340
341 mxport = kzalloc(sizeof(struct mxu1_port), GFP_KERNEL);
342 if (!mxport)
343 return -ENOMEM;
344
345 spin_lock_init(&mxport->spinlock);
346 mutex_init(&mxport->mutex);
347
348 mxdev = usb_get_serial_data(port->serial);
349
350 switch (mxdev->mxd_model) {
351 case MXU1_1110_PRODUCT_ID:
352 case MXU1_1150_PRODUCT_ID:
353 case MXU1_1151_PRODUCT_ID:
354 mxport->uart_mode = MXU1_UART_232;
355 break;
356 case MXU1_1130_PRODUCT_ID:
357 case MXU1_1131_PRODUCT_ID:
358 mxport->uart_mode = MXU1_UART_485_RECEIVER_DISABLED;
359 break;
360 }
361
362 usb_set_serial_port_data(port, mxport);
363
364 port->port.closing_wait =
365 msecs_to_jiffies(MXU1_DEFAULT_CLOSING_WAIT * 10);
366 port->port.drain_delay = 1;
367
368 return 0;
369}
370
371static int mxu1_port_remove(struct usb_serial_port *port)
372{
373 struct mxu1_port *mxport;
374
375 mxport = usb_get_serial_port_data(port);
376 kfree(mxport);
377
378 return 0;
379}
380
381static int mxu1_startup(struct usb_serial *serial)
382{
383 struct mxu1_device *mxdev;
384 struct usb_device *dev = serial->dev;
385 struct usb_host_interface *cur_altsetting;
386 char fw_name[32];
387 const struct firmware *fw_p = NULL;
388 int err;
389
390 dev_dbg(&serial->interface->dev, "%s - product 0x%04X, num configurations %d, configuration value %d\n",
391 __func__, le16_to_cpu(dev->descriptor.idProduct),
392 dev->descriptor.bNumConfigurations,
393 dev->actconfig->desc.bConfigurationValue);
394
395 /* create device structure */
396 mxdev = kzalloc(sizeof(struct mxu1_device), GFP_KERNEL);
397 if (!mxdev)
398 return -ENOMEM;
399
400 usb_set_serial_data(serial, mxdev);
401
402 mxdev->mxd_model = le16_to_cpu(dev->descriptor.idProduct);
403
404 cur_altsetting = serial->interface->cur_altsetting;
405
406 /* if we have only 1 configuration, download firmware */
407 if (cur_altsetting->desc.bNumEndpoints == 1) {
408
409 snprintf(fw_name,
410 sizeof(fw_name),
411 "moxa/moxa-%04x.fw",
412 mxdev->mxd_model);
413
414 err = request_firmware(&fw_p, fw_name, &serial->interface->dev);
415 if (err) {
416 dev_err(&serial->interface->dev, "failed to request firmware: %d\n",
417 err);
418 goto err_free_mxdev;
419 }
420
421 err = mxu1_download_firmware(serial, fw_p);
422 if (err)
423 goto err_release_firmware;
424
425 /* device is being reset */
426 err = -ENODEV;
427 goto err_release_firmware;
428 }
429
430 return 0;
431
432err_release_firmware:
433 release_firmware(fw_p);
434err_free_mxdev:
435 kfree(mxdev);
436
437 return err;
438}
439
440static void mxu1_release(struct usb_serial *serial)
441{
442 struct mxu1_device *mxdev;
443
444 mxdev = usb_get_serial_data(serial);
445 kfree(mxdev);
446}
447
448static int mxu1_write_byte(struct usb_serial_port *port, u32 addr,
449 u8 mask, u8 byte)
450{
451 int status;
452 size_t size;
453 struct mxu1_write_data_bytes *data;
454
455 dev_dbg(&port->dev, "%s - addr 0x%08X, mask 0x%02X, byte 0x%02X\n",
456 __func__, addr, mask, byte);
457
458 size = sizeof(struct mxu1_write_data_bytes) + 2;
459 data = kzalloc(size, GFP_KERNEL);
460 if (!data)
461 return -ENOMEM;
462
463 data->bAddrType = MXU1_RW_DATA_ADDR_XDATA;
464 data->bDataType = MXU1_RW_DATA_BYTE;
465 data->bDataCounter = 1;
466 data->wBaseAddrHi = cpu_to_be16(addr >> 16);
467 data->wBaseAddrLo = cpu_to_be16(addr);
468 data->bData[0] = mask;
469 data->bData[1] = byte;
470
471 status = mxu1_send_ctrl_data_urb(port->serial, MXU1_WRITE_DATA, 0,
472 MXU1_RAM_PORT, data, size);
473 if (status < 0)
474 dev_err(&port->dev, "%s - failed: %d\n", __func__, status);
475
476 kfree(data);
477
478 return status;
479}
480
481static int mxu1_set_mcr(struct usb_serial_port *port, unsigned int mcr)
482{
483 int status;
484
485 status = mxu1_write_byte(port,
486 MXU1_UART_BASE_ADDR + MXU1_UART_OFFSET_MCR,
487 MXU1_MCR_RTS | MXU1_MCR_DTR | MXU1_MCR_LOOP,
488 mcr);
489 return status;
490}
491
492static void mxu1_set_termios(struct tty_struct *tty,
493 struct usb_serial_port *port,
494 struct ktermios *old_termios)
495{
496 struct mxu1_port *mxport = usb_get_serial_port_data(port);
497 struct mxu1_uart_config *config;
498 tcflag_t cflag, iflag;
499 speed_t baud;
500 int status;
501 unsigned int mcr;
502
503 cflag = tty->termios.c_cflag;
504 iflag = tty->termios.c_iflag;
505
506 if (old_termios &&
507 !tty_termios_hw_change(&tty->termios, old_termios) &&
508 tty->termios.c_iflag == old_termios->c_iflag) {
509 dev_dbg(&port->dev, "%s - nothing to change\n", __func__);
510 return;
511 }
512
513 dev_dbg(&port->dev,
514 "%s - cflag 0x%08x, iflag 0x%08x\n", __func__, cflag, iflag);
515
516 if (old_termios) {
517 dev_dbg(&port->dev, "%s - old cflag 0x%08x, old iflag 0x%08x\n",
518 __func__,
519 old_termios->c_cflag,
520 old_termios->c_iflag);
521 }
522
523 config = kzalloc(sizeof(*config), GFP_KERNEL);
524 if (!config)
525 return;
526
527 /* these flags must be set */
528 config->wFlags |= MXU1_UART_ENABLE_MS_INTS;
529 config->wFlags |= MXU1_UART_ENABLE_AUTO_START_DMA;
530 if (mxport->send_break)
531 config->wFlags |= MXU1_UART_SEND_BREAK_SIGNAL;
532 config->bUartMode = mxport->uart_mode;
533
534 switch (C_CSIZE(tty)) {
535 case CS5:
536 config->bDataBits = MXU1_UART_5_DATA_BITS;
537 break;
538 case CS6:
539 config->bDataBits = MXU1_UART_6_DATA_BITS;
540 break;
541 case CS7:
542 config->bDataBits = MXU1_UART_7_DATA_BITS;
543 break;
544 default:
545 case CS8:
546 config->bDataBits = MXU1_UART_8_DATA_BITS;
547 break;
548 }
549
550 if (C_PARENB(tty)) {
551 config->wFlags |= MXU1_UART_ENABLE_PARITY_CHECKING;
552 if (C_CMSPAR(tty)) {
553 if (C_PARODD(tty))
554 config->bParity = MXU1_UART_MARK_PARITY;
555 else
556 config->bParity = MXU1_UART_SPACE_PARITY;
557 } else {
558 if (C_PARODD(tty))
559 config->bParity = MXU1_UART_ODD_PARITY;
560 else
561 config->bParity = MXU1_UART_EVEN_PARITY;
562 }
563 } else {
564 config->bParity = MXU1_UART_NO_PARITY;
565 }
566
567 if (C_CSTOPB(tty))
568 config->bStopBits = MXU1_UART_2_STOP_BITS;
569 else
570 config->bStopBits = MXU1_UART_1_STOP_BITS;
571
572 if (C_CRTSCTS(tty)) {
573 /* RTS flow control must be off to drop RTS for baud rate B0 */
574 if (C_BAUD(tty) != B0)
575 config->wFlags |= MXU1_UART_ENABLE_RTS_IN;
576 config->wFlags |= MXU1_UART_ENABLE_CTS_OUT;
577 }
578
579 if (I_IXOFF(tty) || I_IXON(tty)) {
580 config->cXon = START_CHAR(tty);
581 config->cXoff = STOP_CHAR(tty);
582
583 if (I_IXOFF(tty))
584 config->wFlags |= MXU1_UART_ENABLE_X_IN;
585
586 if (I_IXON(tty))
587 config->wFlags |= MXU1_UART_ENABLE_X_OUT;
588 }
589
590 baud = tty_get_baud_rate(tty);
591 if (!baud)
592 baud = 9600;
593 config->wBaudRate = MXU1_BAUD_BASE / baud;
594
595 dev_dbg(&port->dev, "%s - BaudRate=%d, wBaudRate=%d, wFlags=0x%04X, bDataBits=%d, bParity=%d, bStopBits=%d, cXon=%d, cXoff=%d, bUartMode=%d\n",
596 __func__, baud, config->wBaudRate, config->wFlags,
597 config->bDataBits, config->bParity, config->bStopBits,
598 config->cXon, config->cXoff, config->bUartMode);
599
600 cpu_to_be16s(&config->wBaudRate);
601 cpu_to_be16s(&config->wFlags);
602
603 status = mxu1_send_ctrl_data_urb(port->serial, MXU1_SET_CONFIG, 0,
604 MXU1_UART1_PORT, config,
605 sizeof(*config));
606 if (status)
607 dev_err(&port->dev, "cannot set config: %d\n", status);
608
609 mutex_lock(&mxport->mutex);
610 mcr = mxport->mcr;
611
612 if (C_BAUD(tty) == B0)
613 mcr &= ~(MXU1_MCR_DTR | MXU1_MCR_RTS);
614 else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
615 mcr |= MXU1_MCR_DTR | MXU1_MCR_RTS;
616
617 status = mxu1_set_mcr(port, mcr);
618 if (status)
619 dev_err(&port->dev, "cannot set modem control: %d\n", status);
620 else
621 mxport->mcr = mcr;
622
623 mutex_unlock(&mxport->mutex);
624
625 kfree(config);
626}
627
628static int mxu1_get_serial_info(struct usb_serial_port *port,
629 struct serial_struct __user *ret_arg)
630{
631 struct serial_struct ret_serial;
632 unsigned cwait;
633
634 if (!ret_arg)
635 return -EFAULT;
636
637 cwait = port->port.closing_wait;
638 if (cwait != ASYNC_CLOSING_WAIT_NONE)
639 cwait = jiffies_to_msecs(cwait) / 10;
640
641 memset(&ret_serial, 0, sizeof(ret_serial));
642
643 ret_serial.type = PORT_16550A;
644 ret_serial.line = port->minor;
645 ret_serial.port = 0;
646 ret_serial.xmit_fifo_size = port->bulk_out_size;
647 ret_serial.baud_base = MXU1_BAUD_BASE;
648 ret_serial.close_delay = 5*HZ;
649 ret_serial.closing_wait = cwait;
650
651 if (copy_to_user(ret_arg, &ret_serial, sizeof(*ret_arg)))
652 return -EFAULT;
653
654 return 0;
655}
656
657
658static int mxu1_set_serial_info(struct usb_serial_port *port,
659 struct serial_struct __user *new_arg)
660{
661 struct serial_struct new_serial;
662 unsigned cwait;
663
664 if (copy_from_user(&new_serial, new_arg, sizeof(new_serial)))
665 return -EFAULT;
666
667 cwait = new_serial.closing_wait;
668 if (cwait != ASYNC_CLOSING_WAIT_NONE)
669 cwait = msecs_to_jiffies(10 * new_serial.closing_wait);
670
671 port->port.closing_wait = cwait;
672
673 return 0;
674}
675
676static int mxu1_ioctl(struct tty_struct *tty,
677 unsigned int cmd, unsigned long arg)
678{
679 struct usb_serial_port *port = tty->driver_data;
680
681 switch (cmd) {
682 case TIOCGSERIAL:
683 return mxu1_get_serial_info(port,
684 (struct serial_struct __user *)arg);
685 case TIOCSSERIAL:
686 return mxu1_set_serial_info(port,
687 (struct serial_struct __user *)arg);
688 }
689
690 return -ENOIOCTLCMD;
691}
692
693static int mxu1_tiocmget(struct tty_struct *tty)
694{
695 struct usb_serial_port *port = tty->driver_data;
696 struct mxu1_port *mxport = usb_get_serial_port_data(port);
697 unsigned int result;
698 unsigned int msr;
699 unsigned int mcr;
700 unsigned long flags;
701
702 mutex_lock(&mxport->mutex);
703 spin_lock_irqsave(&mxport->spinlock, flags);
704
705 msr = mxport->msr;
706 mcr = mxport->mcr;
707
708 spin_unlock_irqrestore(&mxport->spinlock, flags);
709 mutex_unlock(&mxport->mutex);
710
711 result = ((mcr & MXU1_MCR_DTR) ? TIOCM_DTR : 0) |
712 ((mcr & MXU1_MCR_RTS) ? TIOCM_RTS : 0) |
713 ((mcr & MXU1_MCR_LOOP) ? TIOCM_LOOP : 0) |
714 ((msr & MXU1_MSR_CTS) ? TIOCM_CTS : 0) |
715 ((msr & MXU1_MSR_CD) ? TIOCM_CAR : 0) |
716 ((msr & MXU1_MSR_RI) ? TIOCM_RI : 0) |
717 ((msr & MXU1_MSR_DSR) ? TIOCM_DSR : 0);
718
719 dev_dbg(&port->dev, "%s - 0x%04X\n", __func__, result);
720
721 return result;
722}
723
724static int mxu1_tiocmset(struct tty_struct *tty,
725 unsigned int set, unsigned int clear)
726{
727 struct usb_serial_port *port = tty->driver_data;
728 struct mxu1_port *mxport = usb_get_serial_port_data(port);
729 int err;
730 unsigned int mcr;
731
732 mutex_lock(&mxport->mutex);
733 mcr = mxport->mcr;
734
735 if (set & TIOCM_RTS)
736 mcr |= MXU1_MCR_RTS;
737 if (set & TIOCM_DTR)
738 mcr |= MXU1_MCR_DTR;
739 if (set & TIOCM_LOOP)
740 mcr |= MXU1_MCR_LOOP;
741
742 if (clear & TIOCM_RTS)
743 mcr &= ~MXU1_MCR_RTS;
744 if (clear & TIOCM_DTR)
745 mcr &= ~MXU1_MCR_DTR;
746 if (clear & TIOCM_LOOP)
747 mcr &= ~MXU1_MCR_LOOP;
748
749 err = mxu1_set_mcr(port, mcr);
750 if (!err)
751 mxport->mcr = mcr;
752
753 mutex_unlock(&mxport->mutex);
754
755 return err;
756}
757
758static void mxu1_break(struct tty_struct *tty, int break_state)
759{
760 struct usb_serial_port *port = tty->driver_data;
761 struct mxu1_port *mxport = usb_get_serial_port_data(port);
762
763 if (break_state == -1)
764 mxport->send_break = true;
765 else
766 mxport->send_break = false;
767
768 mxu1_set_termios(tty, port, NULL);
769}
770
771static int mxu1_open(struct tty_struct *tty, struct usb_serial_port *port)
772{
773 struct mxu1_port *mxport = usb_get_serial_port_data(port);
774 struct usb_serial *serial = port->serial;
775 int status;
776 u16 open_settings;
777
778 open_settings = (MXU1_PIPE_MODE_CONTINUOUS |
779 MXU1_PIPE_TIMEOUT_ENABLE |
780 (MXU1_TRANSFER_TIMEOUT << 2));
781
782 mxport->msr = 0;
783
784 status = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
785 if (status) {
786 dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
787 status);
788 return status;
789 }
790
791 if (tty)
792 mxu1_set_termios(tty, port, NULL);
793
794 status = mxu1_send_ctrl_urb(serial, MXU1_OPEN_PORT,
795 open_settings, MXU1_UART1_PORT);
796 if (status) {
797 dev_err(&port->dev, "cannot send open command: %d\n", status);
798 goto unlink_int_urb;
799 }
800
801 status = mxu1_send_ctrl_urb(serial, MXU1_START_PORT,
802 0, MXU1_UART1_PORT);
803 if (status) {
804 dev_err(&port->dev, "cannot send start command: %d\n", status);
805 goto unlink_int_urb;
806 }
807
808 status = mxu1_send_ctrl_urb(serial, MXU1_PURGE_PORT,
809 MXU1_PURGE_INPUT, MXU1_UART1_PORT);
810 if (status) {
811 dev_err(&port->dev, "cannot clear input buffers: %d\n",
812 status);
813
814 goto unlink_int_urb;
815 }
816
817 status = mxu1_send_ctrl_urb(serial, MXU1_PURGE_PORT,
818 MXU1_PURGE_OUTPUT, MXU1_UART1_PORT);
819 if (status) {
820 dev_err(&port->dev, "cannot clear output buffers: %d\n",
821 status);
822
823 goto unlink_int_urb;
824 }
825
826 /*
827 * reset the data toggle on the bulk endpoints to work around bug in
828 * host controllers where things get out of sync some times
829 */
830 usb_clear_halt(serial->dev, port->write_urb->pipe);
831 usb_clear_halt(serial->dev, port->read_urb->pipe);
832
833 if (tty)
834 mxu1_set_termios(tty, port, NULL);
835
836 status = mxu1_send_ctrl_urb(serial, MXU1_OPEN_PORT,
837 open_settings, MXU1_UART1_PORT);
838 if (status) {
839 dev_err(&port->dev, "cannot send open command: %d\n", status);
840 goto unlink_int_urb;
841 }
842
843 status = mxu1_send_ctrl_urb(serial, MXU1_START_PORT,
844 0, MXU1_UART1_PORT);
845 if (status) {
846 dev_err(&port->dev, "cannot send start command: %d\n", status);
847 goto unlink_int_urb;
848 }
849
850 status = usb_serial_generic_open(tty, port);
851 if (status)
852 goto unlink_int_urb;
853
854 return 0;
855
856unlink_int_urb:
857 usb_kill_urb(port->interrupt_in_urb);
858
859 return status;
860}
861
862static void mxu1_close(struct usb_serial_port *port)
863{
864 int status;
865
866 usb_serial_generic_close(port);
867 usb_kill_urb(port->interrupt_in_urb);
868
869 status = mxu1_send_ctrl_urb(port->serial, MXU1_CLOSE_PORT,
870 0, MXU1_UART1_PORT);
871 if (status) {
872 dev_err(&port->dev, "failed to send close port command: %d\n",
873 status);
874 }
875}
876
877static void mxu1_handle_new_msr(struct usb_serial_port *port, u8 msr)
878{
879 struct mxu1_port *mxport = usb_get_serial_port_data(port);
880 struct async_icount *icount;
881 unsigned long flags;
882
883 dev_dbg(&port->dev, "%s - msr 0x%02X\n", __func__, msr);
884
885 spin_lock_irqsave(&mxport->spinlock, flags);
886 mxport->msr = msr & MXU1_MSR_MASK;
887 spin_unlock_irqrestore(&mxport->spinlock, flags);
888
889 if (msr & MXU1_MSR_DELTA_MASK) {
890 icount = &port->icount;
891 if (msr & MXU1_MSR_DELTA_CTS)
892 icount->cts++;
893 if (msr & MXU1_MSR_DELTA_DSR)
894 icount->dsr++;
895 if (msr & MXU1_MSR_DELTA_CD)
896 icount->dcd++;
897 if (msr & MXU1_MSR_DELTA_RI)
898 icount->rng++;
899
900 wake_up_interruptible(&port->port.delta_msr_wait);
901 }
902}
903
904static void mxu1_interrupt_callback(struct urb *urb)
905{
906 struct usb_serial_port *port = urb->context;
907 unsigned char *data = urb->transfer_buffer;
908 int length = urb->actual_length;
909 int function;
910 int status;
911 u8 msr;
912
913 switch (urb->status) {
914 case 0:
915 break;
916 case -ECONNRESET:
917 case -ENOENT:
918 case -ESHUTDOWN:
919 dev_dbg(&port->dev, "%s - urb shutting down: %d\n",
920 __func__, urb->status);
921 return;
922 default:
923 dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
924 __func__, urb->status);
925 goto exit;
926 }
927
928 if (length != 2) {
929 dev_dbg(&port->dev, "%s - bad packet size: %d\n",
930 __func__, length);
931 goto exit;
932 }
933
934 if (data[0] == MXU1_CODE_HARDWARE_ERROR) {
935 dev_err(&port->dev, "hardware error: %d\n", data[1]);
936 goto exit;
937 }
938
939 function = mxu1_get_func_from_code(data[0]);
940
941 dev_dbg(&port->dev, "%s - function %d, data 0x%02X\n",
942 __func__, function, data[1]);
943
944 switch (function) {
945 case MXU1_CODE_DATA_ERROR:
946 dev_dbg(&port->dev, "%s - DATA ERROR, data 0x%02X\n",
947 __func__, data[1]);
948 break;
949
950 case MXU1_CODE_MODEM_STATUS:
951 msr = data[1];
952 mxu1_handle_new_msr(port, msr);
953 break;
954
955 default:
956 dev_err(&port->dev, "unknown interrupt code: 0x%02X\n",
957 data[1]);
958 break;
959 }
960
961exit:
962 status = usb_submit_urb(urb, GFP_ATOMIC);
963 if (status) {
964 dev_err(&port->dev, "resubmit interrupt urb failed: %d\n",
965 status);
966 }
967}
968
969static struct usb_serial_driver mxu11x0_device = {
970 .driver = {
971 .owner = THIS_MODULE,
972 .name = "mxu11x0",
973 },
974 .description = "MOXA UPort 11x0",
975 .id_table = mxu1_idtable,
976 .num_ports = 1,
977 .port_probe = mxu1_port_probe,
978 .port_remove = mxu1_port_remove,
979 .attach = mxu1_startup,
980 .release = mxu1_release,
981 .open = mxu1_open,
982 .close = mxu1_close,
983 .ioctl = mxu1_ioctl,
984 .set_termios = mxu1_set_termios,
985 .tiocmget = mxu1_tiocmget,
986 .tiocmset = mxu1_tiocmset,
987 .tiocmiwait = usb_serial_generic_tiocmiwait,
988 .get_icount = usb_serial_generic_get_icount,
989 .break_ctl = mxu1_break,
990 .read_int_callback = mxu1_interrupt_callback,
991};
992
993static struct usb_serial_driver *const serial_drivers[] = {
994 &mxu11x0_device, NULL
995};
996
997module_usb_serial_driver(serial_drivers, mxu1_idtable);
998
999MODULE_AUTHOR("Mathieu Othacehe <m.othacehe@gmail.com>");
1000MODULE_DESCRIPTION("MOXA UPort 11x0 USB to Serial Hub Driver");
1001MODULE_LICENSE("GPL");
1002MODULE_FIRMWARE("moxa/moxa-1110.fw");
1003MODULE_FIRMWARE("moxa/moxa-1130.fw");
1004MODULE_FIRMWARE("moxa/moxa-1131.fw");
1005MODULE_FIRMWARE("moxa/moxa-1150.fw");
1006MODULE_FIRMWARE("moxa/moxa-1151.fw");
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index db86e512e0fc..348e19834b83 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -270,6 +270,7 @@ static void option_instat_callback(struct urb *urb);
270#define TELIT_PRODUCT_UE910_V2 0x1012 270#define TELIT_PRODUCT_UE910_V2 0x1012
271#define TELIT_PRODUCT_LE922_USBCFG0 0x1042 271#define TELIT_PRODUCT_LE922_USBCFG0 0x1042
272#define TELIT_PRODUCT_LE922_USBCFG3 0x1043 272#define TELIT_PRODUCT_LE922_USBCFG3 0x1043
273#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
273#define TELIT_PRODUCT_LE920 0x1200 274#define TELIT_PRODUCT_LE920 0x1200
274#define TELIT_PRODUCT_LE910 0x1201 275#define TELIT_PRODUCT_LE910 0x1201
275 276
@@ -315,6 +316,7 @@ static void option_instat_callback(struct urb *urb);
315#define TOSHIBA_PRODUCT_G450 0x0d45 316#define TOSHIBA_PRODUCT_G450 0x0d45
316 317
317#define ALINK_VENDOR_ID 0x1e0e 318#define ALINK_VENDOR_ID 0x1e0e
319#define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */
318#define ALINK_PRODUCT_PH300 0x9100 320#define ALINK_PRODUCT_PH300 0x9100
319#define ALINK_PRODUCT_3GU 0x9200 321#define ALINK_PRODUCT_3GU 0x9200
320 322
@@ -607,6 +609,10 @@ static const struct option_blacklist_info zte_1255_blacklist = {
607 .reserved = BIT(3) | BIT(4), 609 .reserved = BIT(3) | BIT(4),
608}; 610};
609 611
612static const struct option_blacklist_info simcom_sim7100e_blacklist = {
613 .reserved = BIT(5) | BIT(6),
614};
615
610static const struct option_blacklist_info telit_le910_blacklist = { 616static const struct option_blacklist_info telit_le910_blacklist = {
611 .sendsetup = BIT(0), 617 .sendsetup = BIT(0),
612 .reserved = BIT(1) | BIT(2), 618 .reserved = BIT(1) | BIT(2),
@@ -1122,9 +1128,13 @@ static const struct usb_device_id option_ids[] = {
1122 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, 1128 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
1123 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, 1129 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
1124 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ 1130 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
1131 { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
1132 .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
1125 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 1133 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
1126 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ 1134 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
1127 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ 1135 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
1136 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */
1137 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1128 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, 1138 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1129 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, 1139 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
1130 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), 1140 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1176,6 +1186,8 @@ static const struct usb_device_id option_ids[] = {
1176 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, 1186 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
1177 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3), 1187 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
1178 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, 1188 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
1189 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
1190 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
1179 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), 1191 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
1180 .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, 1192 .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
1181 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), 1193 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
@@ -1645,6 +1657,8 @@ static const struct usb_device_id option_ids[] = {
1645 { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, 1657 { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
1646 { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) }, 1658 { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
1647 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, 1659 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
1660 { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
1661 .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
1648 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), 1662 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
1649 .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist 1663 .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
1650 }, 1664 },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 9919d2a9faf2..1bc6089b9008 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -157,14 +157,17 @@ static const struct usb_device_id id_table[] = {
157 {DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */ 157 {DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */
158 {DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */ 158 {DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */
159 {DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */ 159 {DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */
160 {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx/EM74xx */ 160 {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */
161 {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx/EM74xx */ 161 {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */
162 {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */
163 {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */
162 {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ 164 {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
163 {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ 165 {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
164 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ 166 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
165 {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ 167 {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
166 {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ 168 {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
167 {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ 169 {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
170 {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
168 171
169 /* Huawei devices */ 172 /* Huawei devices */
170 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ 173 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 2760a7ba3f30..8c80a48e3233 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -446,7 +446,8 @@ static long vfio_pci_ioctl(void *device_data,
446 info.num_regions = VFIO_PCI_NUM_REGIONS; 446 info.num_regions = VFIO_PCI_NUM_REGIONS;
447 info.num_irqs = VFIO_PCI_NUM_IRQS; 447 info.num_irqs = VFIO_PCI_NUM_IRQS;
448 448
449 return copy_to_user((void __user *)arg, &info, minsz); 449 return copy_to_user((void __user *)arg, &info, minsz) ?
450 -EFAULT : 0;
450 451
451 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) { 452 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
452 struct pci_dev *pdev = vdev->pdev; 453 struct pci_dev *pdev = vdev->pdev;
@@ -520,7 +521,8 @@ static long vfio_pci_ioctl(void *device_data,
520 return -EINVAL; 521 return -EINVAL;
521 } 522 }
522 523
523 return copy_to_user((void __user *)arg, &info, minsz); 524 return copy_to_user((void __user *)arg, &info, minsz) ?
525 -EFAULT : 0;
524 526
525 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) { 527 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
526 struct vfio_irq_info info; 528 struct vfio_irq_info info;
@@ -555,7 +557,8 @@ static long vfio_pci_ioctl(void *device_data,
555 else 557 else
556 info.flags |= VFIO_IRQ_INFO_NORESIZE; 558 info.flags |= VFIO_IRQ_INFO_NORESIZE;
557 559
558 return copy_to_user((void __user *)arg, &info, minsz); 560 return copy_to_user((void __user *)arg, &info, minsz) ?
561 -EFAULT : 0;
559 562
560 } else if (cmd == VFIO_DEVICE_SET_IRQS) { 563 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
561 struct vfio_irq_set hdr; 564 struct vfio_irq_set hdr;
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index 418cdd9ba3f4..e65b142d3422 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -219,7 +219,8 @@ static long vfio_platform_ioctl(void *device_data,
219 info.num_regions = vdev->num_regions; 219 info.num_regions = vdev->num_regions;
220 info.num_irqs = vdev->num_irqs; 220 info.num_irqs = vdev->num_irqs;
221 221
222 return copy_to_user((void __user *)arg, &info, minsz); 222 return copy_to_user((void __user *)arg, &info, minsz) ?
223 -EFAULT : 0;
223 224
224 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) { 225 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
225 struct vfio_region_info info; 226 struct vfio_region_info info;
@@ -240,7 +241,8 @@ static long vfio_platform_ioctl(void *device_data,
240 info.size = vdev->regions[info.index].size; 241 info.size = vdev->regions[info.index].size;
241 info.flags = vdev->regions[info.index].flags; 242 info.flags = vdev->regions[info.index].flags;
242 243
243 return copy_to_user((void __user *)arg, &info, minsz); 244 return copy_to_user((void __user *)arg, &info, minsz) ?
245 -EFAULT : 0;
244 246
245 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) { 247 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
246 struct vfio_irq_info info; 248 struct vfio_irq_info info;
@@ -259,7 +261,8 @@ static long vfio_platform_ioctl(void *device_data,
259 info.flags = vdev->irqs[info.index].flags; 261 info.flags = vdev->irqs[info.index].flags;
260 info.count = vdev->irqs[info.index].count; 262 info.count = vdev->irqs[info.index].count;
261 263
262 return copy_to_user((void __user *)arg, &info, minsz); 264 return copy_to_user((void __user *)arg, &info, minsz) ?
265 -EFAULT : 0;
263 266
264 } else if (cmd == VFIO_DEVICE_SET_IRQS) { 267 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
265 struct vfio_irq_set hdr; 268 struct vfio_irq_set hdr;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 6f1ea3dddbad..75b24e93cedb 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -999,7 +999,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
999 999
1000 info.iova_pgsizes = vfio_pgsize_bitmap(iommu); 1000 info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
1001 1001
1002 return copy_to_user((void __user *)arg, &info, minsz); 1002 return copy_to_user((void __user *)arg, &info, minsz) ?
1003 -EFAULT : 0;
1003 1004
1004 } else if (cmd == VFIO_IOMMU_MAP_DMA) { 1005 } else if (cmd == VFIO_IOMMU_MAP_DMA) {
1005 struct vfio_iommu_type1_dma_map map; 1006 struct vfio_iommu_type1_dma_map map;
@@ -1032,7 +1033,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
1032 if (ret) 1033 if (ret)
1033 return ret; 1034 return ret;
1034 1035
1035 return copy_to_user((void __user *)arg, &unmap, minsz); 1036 return copy_to_user((void __user *)arg, &unmap, minsz) ?
1037 -EFAULT : 0;
1036 } 1038 }
1037 1039
1038 return -ENOTTY; 1040 return -ENOTTY;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index ad2146a9ab2d..236553e81027 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1156,6 +1156,8 @@ int vhost_init_used(struct vhost_virtqueue *vq)
1156{ 1156{
1157 __virtio16 last_used_idx; 1157 __virtio16 last_used_idx;
1158 int r; 1158 int r;
1159 bool is_le = vq->is_le;
1160
1159 if (!vq->private_data) { 1161 if (!vq->private_data) {
1160 vq->is_le = virtio_legacy_is_little_endian(); 1162 vq->is_le = virtio_legacy_is_little_endian();
1161 return 0; 1163 return 0;
@@ -1165,15 +1167,20 @@ int vhost_init_used(struct vhost_virtqueue *vq)
1165 1167
1166 r = vhost_update_used_flags(vq); 1168 r = vhost_update_used_flags(vq);
1167 if (r) 1169 if (r)
1168 return r; 1170 goto err;
1169 vq->signalled_used_valid = false; 1171 vq->signalled_used_valid = false;
1170 if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) 1172 if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) {
1171 return -EFAULT; 1173 r = -EFAULT;
1174 goto err;
1175 }
1172 r = __get_user(last_used_idx, &vq->used->idx); 1176 r = __get_user(last_used_idx, &vq->used->idx);
1173 if (r) 1177 if (r)
1174 return r; 1178 goto err;
1175 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx); 1179 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
1176 return 0; 1180 return 0;
1181err:
1182 vq->is_le = is_le;
1183 return r;
1177} 1184}
1178EXPORT_SYMBOL_GPL(vhost_init_used); 1185EXPORT_SYMBOL_GPL(vhost_init_used);
1179 1186
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 92f394927f24..6e92917ba77a 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -709,6 +709,7 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
709 } 709 }
710 710
711 if (!err) { 711 if (!err) {
712 ops->cur_blink_jiffies = HZ / 5;
712 info->fbcon_par = ops; 713 info->fbcon_par = ops;
713 714
714 if (vc) 715 if (vc)
@@ -956,6 +957,7 @@ static const char *fbcon_startup(void)
956 ops->currcon = -1; 957 ops->currcon = -1;
957 ops->graphics = 1; 958 ops->graphics = 1;
958 ops->cur_rotate = -1; 959 ops->cur_rotate = -1;
960 ops->cur_blink_jiffies = HZ / 5;
959 info->fbcon_par = ops; 961 info->fbcon_par = ops;
960 p->con_rotate = initial_rotation; 962 p->con_rotate = initial_rotation;
961 set_blitting_type(vc, info); 963 set_blitting_type(vc, info);
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index c0c11fad4611..7760fc1a2218 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -679,7 +679,7 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
679 679
680 pci_read_config_dword(pci_dev, 680 pci_read_config_dword(pci_dev,
681 notify + offsetof(struct virtio_pci_notify_cap, 681 notify + offsetof(struct virtio_pci_notify_cap,
682 cap.length), 682 cap.offset),
683 &notify_offset); 683 &notify_offset);
684 684
685 /* We don't know how many VQs we'll map, ahead of the time. 685 /* We don't know how many VQs we'll map, ahead of the time.
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 0f6d8515ba4f..80825a7e8e48 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1569,6 +1569,17 @@ config WATCHDOG_RIO
1569 machines. The watchdog timeout period is normally one minute but 1569 machines. The watchdog timeout period is normally one minute but
1570 can be changed with a boot-time parameter. 1570 can be changed with a boot-time parameter.
1571 1571
1572config WATCHDOG_SUN4V
1573 tristate "Sun4v Watchdog support"
1574 select WATCHDOG_CORE
1575 depends on SPARC64
1576 help
1577 Say Y here to support the hypervisor watchdog capability embedded
1578 in the SPARC sun4v architecture.
1579
1580 To compile this driver as a module, choose M here. The module will
1581 be called sun4v_wdt.
1582
1572# XTENSA Architecture 1583# XTENSA Architecture
1573 1584
1574# Xen Architecture 1585# Xen Architecture
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index f566753256ab..f6a6a387c6c7 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -179,6 +179,7 @@ obj-$(CONFIG_SH_WDT) += shwdt.o
179 179
180obj-$(CONFIG_WATCHDOG_RIO) += riowd.o 180obj-$(CONFIG_WATCHDOG_RIO) += riowd.o
181obj-$(CONFIG_WATCHDOG_CP1XXX) += cpwd.o 181obj-$(CONFIG_WATCHDOG_CP1XXX) += cpwd.o
182obj-$(CONFIG_WATCHDOG_SUN4V) += sun4v_wdt.o
182 183
183# XTENSA Architecture 184# XTENSA Architecture
184 185
diff --git a/drivers/watchdog/sun4v_wdt.c b/drivers/watchdog/sun4v_wdt.c
new file mode 100644
index 000000000000..1467fe50a76f
--- /dev/null
+++ b/drivers/watchdog/sun4v_wdt.c
@@ -0,0 +1,191 @@
1/*
2 * sun4v watchdog timer
3 * (c) Copyright 2016 Oracle Corporation
4 *
5 * Implement a simple watchdog driver using the built-in sun4v hypervisor
6 * watchdog support. If time expires, the hypervisor stops or bounces
7 * the guest domain.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/errno.h>
18#include <linux/init.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/watchdog.h>
23#include <asm/hypervisor.h>
24#include <asm/mdesc.h>
25
26#define WDT_TIMEOUT 60
27#define WDT_MAX_TIMEOUT 31536000
28#define WDT_MIN_TIMEOUT 1
29#define WDT_DEFAULT_RESOLUTION_MS 1000 /* 1 second */
30
31static unsigned int timeout;
32module_param(timeout, uint, 0);
33MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default="
34 __MODULE_STRING(WDT_TIMEOUT) ")");
35
36static bool nowayout = WATCHDOG_NOWAYOUT;
37module_param(nowayout, bool, S_IRUGO);
38MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
39 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
40
41static int sun4v_wdt_stop(struct watchdog_device *wdd)
42{
43 sun4v_mach_set_watchdog(0, NULL);
44
45 return 0;
46}
47
48static int sun4v_wdt_ping(struct watchdog_device *wdd)
49{
50 int hverr;
51
52 /*
53 * HV watchdog timer will round up the timeout
54 * passed in to the nearest multiple of the
55 * watchdog resolution in milliseconds.
56 */
57 hverr = sun4v_mach_set_watchdog(wdd->timeout * 1000, NULL);
58 if (hverr == HV_EINVAL)
59 return -EINVAL;
60
61 return 0;
62}
63
64static int sun4v_wdt_set_timeout(struct watchdog_device *wdd,
65 unsigned int timeout)
66{
67 wdd->timeout = timeout;
68
69 return 0;
70}
71
72static const struct watchdog_info sun4v_wdt_ident = {
73 .options = WDIOF_SETTIMEOUT |
74 WDIOF_MAGICCLOSE |
75 WDIOF_KEEPALIVEPING,
76 .identity = "sun4v hypervisor watchdog",
77 .firmware_version = 0,
78};
79
80static struct watchdog_ops sun4v_wdt_ops = {
81 .owner = THIS_MODULE,
82 .start = sun4v_wdt_ping,
83 .stop = sun4v_wdt_stop,
84 .ping = sun4v_wdt_ping,
85 .set_timeout = sun4v_wdt_set_timeout,
86};
87
88static struct watchdog_device wdd = {
89 .info = &sun4v_wdt_ident,
90 .ops = &sun4v_wdt_ops,
91 .min_timeout = WDT_MIN_TIMEOUT,
92 .max_timeout = WDT_MAX_TIMEOUT,
93 .timeout = WDT_TIMEOUT,
94};
95
96static int __init sun4v_wdt_init(void)
97{
98 struct mdesc_handle *handle;
99 u64 node;
100 const u64 *value;
101 int err = 0;
102 unsigned long major = 1, minor = 1;
103
104 /*
105 * There are 2 properties that can be set from the control
106 * domain for the watchdog.
107 * watchdog-resolution
108 * watchdog-max-timeout
109 *
110 * We can expect a handle to be returned otherwise something
111 * serious is wrong. Correct to return -ENODEV here.
112 */
113
114 handle = mdesc_grab();
115 if (!handle)
116 return -ENODEV;
117
118 node = mdesc_node_by_name(handle, MDESC_NODE_NULL, "platform");
119 err = -ENODEV;
120 if (node == MDESC_NODE_NULL)
121 goto out_release;
122
123 /*
124 * This is a safe way to validate if we are on the right
125 * platform.
126 */
127 if (sun4v_hvapi_register(HV_GRP_CORE, major, &minor))
128 goto out_hv_unreg;
129
130 /* Allow value of watchdog-resolution up to 1s (default) */
131 value = mdesc_get_property(handle, node, "watchdog-resolution", NULL);
132 err = -EINVAL;
133 if (value) {
134 if (*value == 0 ||
135 *value > WDT_DEFAULT_RESOLUTION_MS)
136 goto out_hv_unreg;
137 }
138
139 value = mdesc_get_property(handle, node, "watchdog-max-timeout", NULL);
140 if (value) {
141 /*
142 * If the property value (in ms) is smaller than
143 * min_timeout, return -EINVAL.
144 */
145 if (*value < wdd.min_timeout * 1000)
146 goto out_hv_unreg;
147
148 /*
149 * If the property value is smaller than
150 * default max_timeout then set watchdog max_timeout to
151 * the value of the property in seconds.
152 */
153 if (*value < wdd.max_timeout * 1000)
154 wdd.max_timeout = *value / 1000;
155 }
156
157 watchdog_init_timeout(&wdd, timeout, NULL);
158
159 watchdog_set_nowayout(&wdd, nowayout);
160
161 err = watchdog_register_device(&wdd);
162 if (err)
163 goto out_hv_unreg;
164
165 pr_info("initialized (timeout=%ds, nowayout=%d)\n",
166 wdd.timeout, nowayout);
167
168 mdesc_release(handle);
169
170 return 0;
171
172out_hv_unreg:
173 sun4v_hvapi_unregister(HV_GRP_CORE);
174
175out_release:
176 mdesc_release(handle);
177 return err;
178}
179
180static void __exit sun4v_wdt_exit(void)
181{
182 sun4v_hvapi_unregister(HV_GRP_CORE);
183 watchdog_unregister_device(&wdd);
184}
185
186module_init(sun4v_wdt_init);
187module_exit(sun4v_wdt_exit);
188
189MODULE_AUTHOR("Wim Coekaerts <wim.coekaerts@oracle.com>");
190MODULE_DESCRIPTION("sun4v watchdog driver");
191MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 73dafdc494aa..fb0221434f81 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -227,8 +227,9 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
227 /* 227 /*
228 * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able 228 * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
229 * to access the BARs where the MSI-X entries reside. 229 * to access the BARs where the MSI-X entries reside.
230 * But VF devices are unique in which the PF needs to be checked.
230 */ 231 */
231 pci_read_config_word(dev, PCI_COMMAND, &cmd); 232 pci_read_config_word(pci_physfn(dev), PCI_COMMAND, &cmd);
232 if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY)) 233 if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
233 return -ENXIO; 234 return -ENXIO;
234 235
@@ -332,6 +333,9 @@ void xen_pcibk_do_op(struct work_struct *data)
332 struct xen_pcibk_dev_data *dev_data = NULL; 333 struct xen_pcibk_dev_data *dev_data = NULL;
333 struct xen_pci_op *op = &pdev->op; 334 struct xen_pci_op *op = &pdev->op;
334 int test_intx = 0; 335 int test_intx = 0;
336#ifdef CONFIG_PCI_MSI
337 unsigned int nr = 0;
338#endif
335 339
336 *op = pdev->sh_info->op; 340 *op = pdev->sh_info->op;
337 barrier(); 341 barrier();
@@ -360,6 +364,7 @@ void xen_pcibk_do_op(struct work_struct *data)
360 op->err = xen_pcibk_disable_msi(pdev, dev, op); 364 op->err = xen_pcibk_disable_msi(pdev, dev, op);
361 break; 365 break;
362 case XEN_PCI_OP_enable_msix: 366 case XEN_PCI_OP_enable_msix:
367 nr = op->value;
363 op->err = xen_pcibk_enable_msix(pdev, dev, op); 368 op->err = xen_pcibk_enable_msix(pdev, dev, op);
364 break; 369 break;
365 case XEN_PCI_OP_disable_msix: 370 case XEN_PCI_OP_disable_msix:
@@ -382,7 +387,7 @@ void xen_pcibk_do_op(struct work_struct *data)
382 if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) { 387 if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
383 unsigned int i; 388 unsigned int i;
384 389
385 for (i = 0; i < op->value; i++) 390 for (i = 0; i < nr; i++)
386 pdev->sh_info->op.msix_entries[i].vector = 391 pdev->sh_info->op.msix_entries[i].vector =
387 op->msix_entries[i].vector; 392 op->msix_entries[i].vector;
388 } 393 }
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index ad4eb1024d1f..c46ee189466f 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -849,15 +849,31 @@ static int scsiback_map(struct vscsibk_info *info)
849} 849}
850 850
851/* 851/*
852 Check for a translation entry being present
853*/
854static struct v2p_entry *scsiback_chk_translation_entry(
855 struct vscsibk_info *info, struct ids_tuple *v)
856{
857 struct list_head *head = &(info->v2p_entry_lists);
858 struct v2p_entry *entry;
859
860 list_for_each_entry(entry, head, l)
861 if ((entry->v.chn == v->chn) &&
862 (entry->v.tgt == v->tgt) &&
863 (entry->v.lun == v->lun))
864 return entry;
865
866 return NULL;
867}
868
869/*
852 Add a new translation entry 870 Add a new translation entry
853*/ 871*/
854static int scsiback_add_translation_entry(struct vscsibk_info *info, 872static int scsiback_add_translation_entry(struct vscsibk_info *info,
855 char *phy, struct ids_tuple *v) 873 char *phy, struct ids_tuple *v)
856{ 874{
857 int err = 0; 875 int err = 0;
858 struct v2p_entry *entry;
859 struct v2p_entry *new; 876 struct v2p_entry *new;
860 struct list_head *head = &(info->v2p_entry_lists);
861 unsigned long flags; 877 unsigned long flags;
862 char *lunp; 878 char *lunp;
863 unsigned long long unpacked_lun; 879 unsigned long long unpacked_lun;
@@ -917,15 +933,10 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
917 spin_lock_irqsave(&info->v2p_lock, flags); 933 spin_lock_irqsave(&info->v2p_lock, flags);
918 934
919 /* Check double assignment to identical virtual ID */ 935 /* Check double assignment to identical virtual ID */
920 list_for_each_entry(entry, head, l) { 936 if (scsiback_chk_translation_entry(info, v)) {
921 if ((entry->v.chn == v->chn) && 937 pr_warn("Virtual ID is already used. Assignment was not performed.\n");
922 (entry->v.tgt == v->tgt) && 938 err = -EEXIST;
923 (entry->v.lun == v->lun)) { 939 goto out;
924 pr_warn("Virtual ID is already used. Assignment was not performed.\n");
925 err = -EEXIST;
926 goto out;
927 }
928
929 } 940 }
930 941
931 /* Create a new translation entry and add to the list */ 942 /* Create a new translation entry and add to the list */
@@ -933,18 +944,18 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
933 new->v = *v; 944 new->v = *v;
934 new->tpg = tpg; 945 new->tpg = tpg;
935 new->lun = unpacked_lun; 946 new->lun = unpacked_lun;
936 list_add_tail(&new->l, head); 947 list_add_tail(&new->l, &info->v2p_entry_lists);
937 948
938out: 949out:
939 spin_unlock_irqrestore(&info->v2p_lock, flags); 950 spin_unlock_irqrestore(&info->v2p_lock, flags);
940 951
941out_free: 952out_free:
942 mutex_lock(&tpg->tv_tpg_mutex); 953 if (err) {
943 tpg->tv_tpg_fe_count--; 954 mutex_lock(&tpg->tv_tpg_mutex);
944 mutex_unlock(&tpg->tv_tpg_mutex); 955 tpg->tv_tpg_fe_count--;
945 956 mutex_unlock(&tpg->tv_tpg_mutex);
946 if (err)
947 kfree(new); 957 kfree(new);
958 }
948 959
949 return err; 960 return err;
950} 961}
@@ -956,39 +967,40 @@ static void __scsiback_del_translation_entry(struct v2p_entry *entry)
956} 967}
957 968
958/* 969/*
959 Delete the translation entry specfied 970 Delete the translation entry specified
960*/ 971*/
961static int scsiback_del_translation_entry(struct vscsibk_info *info, 972static int scsiback_del_translation_entry(struct vscsibk_info *info,
962 struct ids_tuple *v) 973 struct ids_tuple *v)
963{ 974{
964 struct v2p_entry *entry; 975 struct v2p_entry *entry;
965 struct list_head *head = &(info->v2p_entry_lists);
966 unsigned long flags; 976 unsigned long flags;
977 int ret = 0;
967 978
968 spin_lock_irqsave(&info->v2p_lock, flags); 979 spin_lock_irqsave(&info->v2p_lock, flags);
969 /* Find out the translation entry specified */ 980 /* Find out the translation entry specified */
970 list_for_each_entry(entry, head, l) { 981 entry = scsiback_chk_translation_entry(info, v);
971 if ((entry->v.chn == v->chn) && 982 if (entry)
972 (entry->v.tgt == v->tgt) && 983 __scsiback_del_translation_entry(entry);
973 (entry->v.lun == v->lun)) { 984 else
974 goto found; 985 ret = -ENOENT;
975 }
976 }
977
978 spin_unlock_irqrestore(&info->v2p_lock, flags);
979 return 1;
980
981found:
982 /* Delete the translation entry specfied */
983 __scsiback_del_translation_entry(entry);
984 986
985 spin_unlock_irqrestore(&info->v2p_lock, flags); 987 spin_unlock_irqrestore(&info->v2p_lock, flags);
986 return 0; 988 return ret;
987} 989}
988 990
989static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state, 991static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
990 char *phy, struct ids_tuple *vir, int try) 992 char *phy, struct ids_tuple *vir, int try)
991{ 993{
994 struct v2p_entry *entry;
995 unsigned long flags;
996
997 if (try) {
998 spin_lock_irqsave(&info->v2p_lock, flags);
999 entry = scsiback_chk_translation_entry(info, vir);
1000 spin_unlock_irqrestore(&info->v2p_lock, flags);
1001 if (entry)
1002 return;
1003 }
992 if (!scsiback_add_translation_entry(info, phy, vir)) { 1004 if (!scsiback_add_translation_entry(info, phy, vir)) {
993 if (xenbus_printf(XBT_NIL, info->dev->nodename, state, 1005 if (xenbus_printf(XBT_NIL, info->dev->nodename, state,
994 "%d", XenbusStateInitialised)) { 1006 "%d", XenbusStateInitialised)) {
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 9433e46518c8..912b64edb42b 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -188,6 +188,8 @@ static int queue_reply(struct list_head *queue, const void *data, size_t len)
188 188
189 if (len == 0) 189 if (len == 0)
190 return 0; 190 return 0;
191 if (len > XENSTORE_PAYLOAD_MAX)
192 return -EINVAL;
191 193
192 rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); 194 rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
193 if (rb == NULL) 195 if (rb == NULL)
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 0548c53f41d5..22fc7c802d69 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -511,8 +511,6 @@ affs_do_readpage_ofs(struct page *page, unsigned to)
511 pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino, 511 pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino,
512 page->index, to); 512 page->index, to);
513 BUG_ON(to > PAGE_CACHE_SIZE); 513 BUG_ON(to > PAGE_CACHE_SIZE);
514 kmap(page);
515 data = page_address(page);
516 bsize = AFFS_SB(sb)->s_data_blksize; 514 bsize = AFFS_SB(sb)->s_data_blksize;
517 tmp = page->index << PAGE_CACHE_SHIFT; 515 tmp = page->index << PAGE_CACHE_SHIFT;
518 bidx = tmp / bsize; 516 bidx = tmp / bsize;
@@ -524,14 +522,15 @@ affs_do_readpage_ofs(struct page *page, unsigned to)
524 return PTR_ERR(bh); 522 return PTR_ERR(bh);
525 tmp = min(bsize - boff, to - pos); 523 tmp = min(bsize - boff, to - pos);
526 BUG_ON(pos + tmp > to || tmp > bsize); 524 BUG_ON(pos + tmp > to || tmp > bsize);
525 data = kmap_atomic(page);
527 memcpy(data + pos, AFFS_DATA(bh) + boff, tmp); 526 memcpy(data + pos, AFFS_DATA(bh) + boff, tmp);
527 kunmap_atomic(data);
528 affs_brelse(bh); 528 affs_brelse(bh);
529 bidx++; 529 bidx++;
530 pos += tmp; 530 pos += tmp;
531 boff = 0; 531 boff = 0;
532 } 532 }
533 flush_dcache_page(page); 533 flush_dcache_page(page);
534 kunmap(page);
535 return 0; 534 return 0;
536} 535}
537 536
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 051ea4809c14..7d914c67a9d0 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -653,7 +653,7 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
653 653
654 if ((current->flags & PF_RANDOMIZE) && 654 if ((current->flags & PF_RANDOMIZE) &&
655 !(current->personality & ADDR_NO_RANDOMIZE)) { 655 !(current->personality & ADDR_NO_RANDOMIZE)) {
656 random_variable = (unsigned long) get_random_int(); 656 random_variable = get_random_long();
657 random_variable &= STACK_RND_MASK; 657 random_variable &= STACK_RND_MASK;
658 random_variable <<= PAGE_SHIFT; 658 random_variable <<= PAGE_SHIFT;
659 } 659 }
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 39b3a174a425..826b164a4b5b 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1201,7 +1201,11 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1201 bdev->bd_disk = disk; 1201 bdev->bd_disk = disk;
1202 bdev->bd_queue = disk->queue; 1202 bdev->bd_queue = disk->queue;
1203 bdev->bd_contains = bdev; 1203 bdev->bd_contains = bdev;
1204 bdev->bd_inode->i_flags = disk->fops->direct_access ? S_DAX : 0; 1204 if (IS_ENABLED(CONFIG_BLK_DEV_DAX) && disk->fops->direct_access)
1205 bdev->bd_inode->i_flags = S_DAX;
1206 else
1207 bdev->bd_inode->i_flags = 0;
1208
1205 if (!partno) { 1209 if (!partno) {
1206 ret = -ENXIO; 1210 ret = -ENXIO;
1207 bdev->bd_part = disk_get_part(disk, partno); 1211 bdev->bd_part = disk_get_part(disk, partno);
@@ -1693,13 +1697,24 @@ static int blkdev_releasepage(struct page *page, gfp_t wait)
1693 return try_to_free_buffers(page); 1697 return try_to_free_buffers(page);
1694} 1698}
1695 1699
1700static int blkdev_writepages(struct address_space *mapping,
1701 struct writeback_control *wbc)
1702{
1703 if (dax_mapping(mapping)) {
1704 struct block_device *bdev = I_BDEV(mapping->host);
1705
1706 return dax_writeback_mapping_range(mapping, bdev, wbc);
1707 }
1708 return generic_writepages(mapping, wbc);
1709}
1710
1696static const struct address_space_operations def_blk_aops = { 1711static const struct address_space_operations def_blk_aops = {
1697 .readpage = blkdev_readpage, 1712 .readpage = blkdev_readpage,
1698 .readpages = blkdev_readpages, 1713 .readpages = blkdev_readpages,
1699 .writepage = blkdev_writepage, 1714 .writepage = blkdev_writepage,
1700 .write_begin = blkdev_write_begin, 1715 .write_begin = blkdev_write_begin,
1701 .write_end = blkdev_write_end, 1716 .write_end = blkdev_write_end,
1702 .writepages = generic_writepages, 1717 .writepages = blkdev_writepages,
1703 .releasepage = blkdev_releasepage, 1718 .releasepage = blkdev_releasepage,
1704 .direct_IO = blkdev_direct_IO, 1719 .direct_IO = blkdev_direct_IO,
1705 .is_dirty_writeback = buffer_check_dirty_writeback, 1720 .is_dirty_writeback = buffer_check_dirty_writeback,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 151b7c71b868..d96f5cf38a2d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7986,6 +7986,7 @@ static void btrfs_endio_direct_read(struct bio *bio)
7986 7986
7987 kfree(dip); 7987 kfree(dip);
7988 7988
7989 dio_bio->bi_error = bio->bi_error;
7989 dio_end_io(dio_bio, bio->bi_error); 7990 dio_end_io(dio_bio, bio->bi_error);
7990 7991
7991 if (io_bio->end_io) 7992 if (io_bio->end_io)
@@ -8040,6 +8041,7 @@ static void btrfs_endio_direct_write(struct bio *bio)
8040 8041
8041 kfree(dip); 8042 kfree(dip);
8042 8043
8044 dio_bio->bi_error = bio->bi_error;
8043 dio_end_io(dio_bio, bio->bi_error); 8045 dio_end_io(dio_bio, bio->bi_error);
8044 bio_put(bio); 8046 bio_put(bio);
8045} 8047}
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 7cf8509deda7..2c849b08a91b 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -310,8 +310,16 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
310 set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state); 310 set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
311 311
312 err = btrfs_insert_fs_root(root->fs_info, root); 312 err = btrfs_insert_fs_root(root->fs_info, root);
313 /*
314 * The root might have been inserted already, as before we look
315 * for orphan roots, log replay might have happened, which
316 * triggers a transaction commit and qgroup accounting, which
317 * in turn reads and inserts fs roots while doing backref
318 * walking.
319 */
320 if (err == -EEXIST)
321 err = 0;
313 if (err) { 322 if (err) {
314 BUG_ON(err == -EEXIST);
315 btrfs_free_fs_root(root); 323 btrfs_free_fs_root(root);
316 break; 324 break;
317 } 325 }
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index c22213789090..19adeb0ef82a 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1756,6 +1756,10 @@ int ceph_pool_perm_check(struct ceph_inode_info *ci, int need)
1756 u32 pool; 1756 u32 pool;
1757 int ret, flags; 1757 int ret, flags;
1758 1758
1759 /* does not support pool namespace yet */
1760 if (ci->i_pool_ns_len)
1761 return -EIO;
1762
1759 if (ceph_test_mount_opt(ceph_inode_to_client(&ci->vfs_inode), 1763 if (ceph_test_mount_opt(ceph_inode_to_client(&ci->vfs_inode),
1760 NOPOOLPERM)) 1764 NOPOOLPERM))
1761 return 0; 1765 return 0;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index cdbf8cf3d52c..6fe0ad26a7df 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2753,7 +2753,8 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
2753 void *inline_data, int inline_len, 2753 void *inline_data, int inline_len,
2754 struct ceph_buffer *xattr_buf, 2754 struct ceph_buffer *xattr_buf,
2755 struct ceph_mds_session *session, 2755 struct ceph_mds_session *session,
2756 struct ceph_cap *cap, int issued) 2756 struct ceph_cap *cap, int issued,
2757 u32 pool_ns_len)
2757 __releases(ci->i_ceph_lock) 2758 __releases(ci->i_ceph_lock)
2758 __releases(mdsc->snap_rwsem) 2759 __releases(mdsc->snap_rwsem)
2759{ 2760{
@@ -2873,6 +2874,8 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
2873 if (newcaps & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR)) { 2874 if (newcaps & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR)) {
2874 /* file layout may have changed */ 2875 /* file layout may have changed */
2875 ci->i_layout = grant->layout; 2876 ci->i_layout = grant->layout;
2877 ci->i_pool_ns_len = pool_ns_len;
2878
2876 /* size/truncate_seq? */ 2879 /* size/truncate_seq? */
2877 queue_trunc = ceph_fill_file_size(inode, issued, 2880 queue_trunc = ceph_fill_file_size(inode, issued,
2878 le32_to_cpu(grant->truncate_seq), 2881 le32_to_cpu(grant->truncate_seq),
@@ -3411,6 +3414,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
3411 u32 inline_len = 0; 3414 u32 inline_len = 0;
3412 void *snaptrace; 3415 void *snaptrace;
3413 size_t snaptrace_len; 3416 size_t snaptrace_len;
3417 u32 pool_ns_len = 0;
3414 void *p, *end; 3418 void *p, *end;
3415 3419
3416 dout("handle_caps from mds%d\n", mds); 3420 dout("handle_caps from mds%d\n", mds);
@@ -3463,6 +3467,21 @@ void ceph_handle_caps(struct ceph_mds_session *session,
3463 p += inline_len; 3467 p += inline_len;
3464 } 3468 }
3465 3469
3470 if (le16_to_cpu(msg->hdr.version) >= 8) {
3471 u64 flush_tid;
3472 u32 caller_uid, caller_gid;
3473 u32 osd_epoch_barrier;
3474 /* version >= 5 */
3475 ceph_decode_32_safe(&p, end, osd_epoch_barrier, bad);
3476 /* version >= 6 */
3477 ceph_decode_64_safe(&p, end, flush_tid, bad);
3478 /* version >= 7 */
3479 ceph_decode_32_safe(&p, end, caller_uid, bad);
3480 ceph_decode_32_safe(&p, end, caller_gid, bad);
3481 /* version >= 8 */
3482 ceph_decode_32_safe(&p, end, pool_ns_len, bad);
3483 }
3484
3466 /* lookup ino */ 3485 /* lookup ino */
3467 inode = ceph_find_inode(sb, vino); 3486 inode = ceph_find_inode(sb, vino);
3468 ci = ceph_inode(inode); 3487 ci = ceph_inode(inode);
@@ -3518,7 +3537,8 @@ void ceph_handle_caps(struct ceph_mds_session *session,
3518 &cap, &issued); 3537 &cap, &issued);
3519 handle_cap_grant(mdsc, inode, h, 3538 handle_cap_grant(mdsc, inode, h,
3520 inline_version, inline_data, inline_len, 3539 inline_version, inline_data, inline_len,
3521 msg->middle, session, cap, issued); 3540 msg->middle, session, cap, issued,
3541 pool_ns_len);
3522 if (realm) 3542 if (realm)
3523 ceph_put_snap_realm(mdsc, realm); 3543 ceph_put_snap_realm(mdsc, realm);
3524 goto done_unlocked; 3544 goto done_unlocked;
@@ -3542,7 +3562,8 @@ void ceph_handle_caps(struct ceph_mds_session *session,
3542 issued |= __ceph_caps_dirty(ci); 3562 issued |= __ceph_caps_dirty(ci);
3543 handle_cap_grant(mdsc, inode, h, 3563 handle_cap_grant(mdsc, inode, h,
3544 inline_version, inline_data, inline_len, 3564 inline_version, inline_data, inline_len,
3545 msg->middle, session, cap, issued); 3565 msg->middle, session, cap, issued,
3566 pool_ns_len);
3546 goto done_unlocked; 3567 goto done_unlocked;
3547 3568
3548 case CEPH_CAP_OP_FLUSH_ACK: 3569 case CEPH_CAP_OP_FLUSH_ACK:
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index fb4ba2e4e2a5..5849b88bbed3 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -396,6 +396,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
396 ci->i_symlink = NULL; 396 ci->i_symlink = NULL;
397 397
398 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout)); 398 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
399 ci->i_pool_ns_len = 0;
399 400
400 ci->i_fragtree = RB_ROOT; 401 ci->i_fragtree = RB_ROOT;
401 mutex_init(&ci->i_fragtree_mutex); 402 mutex_init(&ci->i_fragtree_mutex);
@@ -756,6 +757,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
756 if (ci->i_layout.fl_pg_pool != info->layout.fl_pg_pool) 757 if (ci->i_layout.fl_pg_pool != info->layout.fl_pg_pool)
757 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM; 758 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
758 ci->i_layout = info->layout; 759 ci->i_layout = info->layout;
760 ci->i_pool_ns_len = iinfo->pool_ns_len;
759 761
760 queue_trunc = ceph_fill_file_size(inode, issued, 762 queue_trunc = ceph_fill_file_size(inode, issued,
761 le32_to_cpu(info->truncate_seq), 763 le32_to_cpu(info->truncate_seq),
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index e7b130a637f9..911d64d865f1 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -100,6 +100,14 @@ static int parse_reply_info_in(void **p, void *end,
100 } else 100 } else
101 info->inline_version = CEPH_INLINE_NONE; 101 info->inline_version = CEPH_INLINE_NONE;
102 102
103 if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
104 ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
105 ceph_decode_need(p, end, info->pool_ns_len, bad);
106 *p += info->pool_ns_len;
107 } else {
108 info->pool_ns_len = 0;
109 }
110
103 return 0; 111 return 0;
104bad: 112bad:
105 return err; 113 return err;
@@ -2298,6 +2306,14 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
2298 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir), 2306 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
2299 CEPH_CAP_PIN); 2307 CEPH_CAP_PIN);
2300 2308
2309 /* deny access to directories with pool_ns layouts */
2310 if (req->r_inode && S_ISDIR(req->r_inode->i_mode) &&
2311 ceph_inode(req->r_inode)->i_pool_ns_len)
2312 return -EIO;
2313 if (req->r_locked_dir &&
2314 ceph_inode(req->r_locked_dir)->i_pool_ns_len)
2315 return -EIO;
2316
2301 /* issue */ 2317 /* issue */
2302 mutex_lock(&mdsc->mutex); 2318 mutex_lock(&mdsc->mutex);
2303 __register_request(mdsc, req, dir); 2319 __register_request(mdsc, req, dir);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index ccf11ef0ca87..37712ccffcc6 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -44,6 +44,7 @@ struct ceph_mds_reply_info_in {
44 u64 inline_version; 44 u64 inline_version;
45 u32 inline_len; 45 u32 inline_len;
46 char *inline_data; 46 char *inline_data;
47 u32 pool_ns_len;
47}; 48};
48 49
49/* 50/*
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 75b7d125ce66..9c458eb52245 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -287,6 +287,7 @@ struct ceph_inode_info {
287 287
288 struct ceph_dir_layout i_dir_layout; 288 struct ceph_dir_layout i_dir_layout;
289 struct ceph_file_layout i_layout; 289 struct ceph_file_layout i_layout;
290 size_t i_pool_ns_len;
290 char *i_symlink; 291 char *i_symlink;
291 292
292 /* for dirs */ 293 /* for dirs */
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 7dc886c9a78f..e956cba94338 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -175,7 +175,7 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
175 * string to the length of the original string to allow for worst case. 175 * string to the length of the original string to allow for worst case.
176 */ 176 */
177 md_len = strlen(sb_mountdata) + INET6_ADDRSTRLEN; 177 md_len = strlen(sb_mountdata) + INET6_ADDRSTRLEN;
178 mountdata = kzalloc(md_len + 1, GFP_KERNEL); 178 mountdata = kzalloc(md_len + sizeof("ip=") + 1, GFP_KERNEL);
179 if (mountdata == NULL) { 179 if (mountdata == NULL) {
180 rc = -ENOMEM; 180 rc = -ENOMEM;
181 goto compose_mount_options_err; 181 goto compose_mount_options_err;
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index afa09fce8151..e682b36a210f 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -714,7 +714,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
714 714
715 ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL); 715 ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL);
716 if (!ses->auth_key.response) { 716 if (!ses->auth_key.response) {
717 rc = ENOMEM; 717 rc = -ENOMEM;
718 ses->auth_key.len = 0; 718 ses->auth_key.len = 0;
719 goto setup_ntlmv2_rsp_ret; 719 goto setup_ntlmv2_rsp_ret;
720 } 720 }
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index c48ca13673e3..2eea40353e60 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1013,7 +1013,6 @@ const struct file_operations cifs_file_strict_ops = {
1013 .llseek = cifs_llseek, 1013 .llseek = cifs_llseek,
1014 .unlocked_ioctl = cifs_ioctl, 1014 .unlocked_ioctl = cifs_ioctl,
1015 .clone_file_range = cifs_clone_file_range, 1015 .clone_file_range = cifs_clone_file_range,
1016 .clone_file_range = cifs_clone_file_range,
1017 .setlease = cifs_setlease, 1016 .setlease = cifs_setlease,
1018 .fallocate = cifs_fallocate, 1017 .fallocate = cifs_fallocate,
1019}; 1018};
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 68c4547528c4..83aac8ba50b0 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -31,19 +31,15 @@
31 * so that it will fit. We use hash_64 to convert the value to 31 bits, and 31 * so that it will fit. We use hash_64 to convert the value to 31 bits, and
32 * then add 1, to ensure that we don't end up with a 0 as the value. 32 * then add 1, to ensure that we don't end up with a 0 as the value.
33 */ 33 */
34#if BITS_PER_LONG == 64
35static inline ino_t 34static inline ino_t
36cifs_uniqueid_to_ino_t(u64 fileid) 35cifs_uniqueid_to_ino_t(u64 fileid)
37{ 36{
37 if ((sizeof(ino_t)) < (sizeof(u64)))
38 return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1;
39
38 return (ino_t)fileid; 40 return (ino_t)fileid;
41
39} 42}
40#else
41static inline ino_t
42cifs_uniqueid_to_ino_t(u64 fileid)
43{
44 return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1;
45}
46#endif
47 43
48extern struct file_system_type cifs_fs_type; 44extern struct file_system_type cifs_fs_type;
49extern const struct address_space_operations cifs_addr_ops; 45extern const struct address_space_operations cifs_addr_ops;
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 90b4f9f7de66..76fcb50295a3 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1396,11 +1396,10 @@ openRetry:
1396 * current bigbuf. 1396 * current bigbuf.
1397 */ 1397 */
1398static int 1398static int
1399cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) 1399discard_remaining_data(struct TCP_Server_Info *server)
1400{ 1400{
1401 unsigned int rfclen = get_rfc1002_length(server->smallbuf); 1401 unsigned int rfclen = get_rfc1002_length(server->smallbuf);
1402 int remaining = rfclen + 4 - server->total_read; 1402 int remaining = rfclen + 4 - server->total_read;
1403 struct cifs_readdata *rdata = mid->callback_data;
1404 1403
1405 while (remaining > 0) { 1404 while (remaining > 0) {
1406 int length; 1405 int length;
@@ -1414,10 +1413,20 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1414 remaining -= length; 1413 remaining -= length;
1415 } 1414 }
1416 1415
1417 dequeue_mid(mid, rdata->result);
1418 return 0; 1416 return 0;
1419} 1417}
1420 1418
1419static int
1420cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1421{
1422 int length;
1423 struct cifs_readdata *rdata = mid->callback_data;
1424
1425 length = discard_remaining_data(server);
1426 dequeue_mid(mid, rdata->result);
1427 return length;
1428}
1429
1421int 1430int
1422cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) 1431cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1423{ 1432{
@@ -1446,6 +1455,12 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1446 return length; 1455 return length;
1447 server->total_read += length; 1456 server->total_read += length;
1448 1457
1458 if (server->ops->is_status_pending &&
1459 server->ops->is_status_pending(buf, server, 0)) {
1460 discard_remaining_data(server);
1461 return -1;
1462 }
1463
1449 /* Was the SMB read successful? */ 1464 /* Was the SMB read successful? */
1450 rdata->result = server->ops->map_error(buf, false); 1465 rdata->result = server->ops->map_error(buf, false);
1451 if (rdata->result != 0) { 1466 if (rdata->result != 0) {
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 4fbd92d2e113..a763cd3d9e7c 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2999,8 +2999,7 @@ ip_rfc1001_connect(struct TCP_Server_Info *server)
2999 if (ses_init_buf) { 2999 if (ses_init_buf) {
3000 ses_init_buf->trailer.session_req.called_len = 32; 3000 ses_init_buf->trailer.session_req.called_len = 32;
3001 3001
3002 if (server->server_RFC1001_name && 3002 if (server->server_RFC1001_name[0] != 0)
3003 server->server_RFC1001_name[0] != 0)
3004 rfc1002mangle(ses_init_buf->trailer. 3003 rfc1002mangle(ses_init_buf->trailer.
3005 session_req.called_name, 3004 session_req.called_name,
3006 server->server_RFC1001_name, 3005 server->server_RFC1001_name,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 10f8d5cf5681..42e1f440eb1e 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1106,21 +1106,25 @@ parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp,
1106{ 1106{
1107 char *data_offset; 1107 char *data_offset;
1108 struct create_context *cc; 1108 struct create_context *cc;
1109 unsigned int next = 0; 1109 unsigned int next;
1110 unsigned int remaining;
1110 char *name; 1111 char *name;
1111 1112
1112 data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset); 1113 data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset);
1114 remaining = le32_to_cpu(rsp->CreateContextsLength);
1113 cc = (struct create_context *)data_offset; 1115 cc = (struct create_context *)data_offset;
1114 do { 1116 while (remaining >= sizeof(struct create_context)) {
1115 cc = (struct create_context *)((char *)cc + next);
1116 name = le16_to_cpu(cc->NameOffset) + (char *)cc; 1117 name = le16_to_cpu(cc->NameOffset) + (char *)cc;
1117 if (le16_to_cpu(cc->NameLength) != 4 || 1118 if (le16_to_cpu(cc->NameLength) == 4 &&
1118 strncmp(name, "RqLs", 4)) { 1119 strncmp(name, "RqLs", 4) == 0)
1119 next = le32_to_cpu(cc->Next); 1120 return server->ops->parse_lease_buf(cc, epoch);
1120 continue; 1121
1121 } 1122 next = le32_to_cpu(cc->Next);
1122 return server->ops->parse_lease_buf(cc, epoch); 1123 if (!next)
1123 } while (next != 0); 1124 break;
1125 remaining -= next;
1126 cc = (struct create_context *)((char *)cc + next);
1127 }
1124 1128
1125 return 0; 1129 return 0;
1126} 1130}
diff --git a/fs/dax.c b/fs/dax.c
index fc2e3141138b..711172450da6 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -79,15 +79,14 @@ struct page *read_dax_sector(struct block_device *bdev, sector_t n)
79} 79}
80 80
81/* 81/*
82 * dax_clear_blocks() is called from within transaction context from XFS, 82 * dax_clear_sectors() is called from within transaction context from XFS,
83 * and hence this means the stack from this point must follow GFP_NOFS 83 * and hence this means the stack from this point must follow GFP_NOFS
84 * semantics for all operations. 84 * semantics for all operations.
85 */ 85 */
86int dax_clear_blocks(struct inode *inode, sector_t block, long _size) 86int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size)
87{ 87{
88 struct block_device *bdev = inode->i_sb->s_bdev;
89 struct blk_dax_ctl dax = { 88 struct blk_dax_ctl dax = {
90 .sector = block << (inode->i_blkbits - 9), 89 .sector = _sector,
91 .size = _size, 90 .size = _size,
92 }; 91 };
93 92
@@ -109,7 +108,7 @@ int dax_clear_blocks(struct inode *inode, sector_t block, long _size)
109 wmb_pmem(); 108 wmb_pmem();
110 return 0; 109 return 0;
111} 110}
112EXPORT_SYMBOL_GPL(dax_clear_blocks); 111EXPORT_SYMBOL_GPL(dax_clear_sectors);
113 112
114/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */ 113/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
115static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first, 114static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
@@ -485,11 +484,10 @@ static int dax_writeback_one(struct block_device *bdev,
485 * end]. This is required by data integrity operations to ensure file data is 484 * end]. This is required by data integrity operations to ensure file data is
486 * on persistent storage prior to completion of the operation. 485 * on persistent storage prior to completion of the operation.
487 */ 486 */
488int dax_writeback_mapping_range(struct address_space *mapping, loff_t start, 487int dax_writeback_mapping_range(struct address_space *mapping,
489 loff_t end) 488 struct block_device *bdev, struct writeback_control *wbc)
490{ 489{
491 struct inode *inode = mapping->host; 490 struct inode *inode = mapping->host;
492 struct block_device *bdev = inode->i_sb->s_bdev;
493 pgoff_t start_index, end_index, pmd_index; 491 pgoff_t start_index, end_index, pmd_index;
494 pgoff_t indices[PAGEVEC_SIZE]; 492 pgoff_t indices[PAGEVEC_SIZE];
495 struct pagevec pvec; 493 struct pagevec pvec;
@@ -500,8 +498,11 @@ int dax_writeback_mapping_range(struct address_space *mapping, loff_t start,
500 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 498 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
501 return -EIO; 499 return -EIO;
502 500
503 start_index = start >> PAGE_CACHE_SHIFT; 501 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
504 end_index = end >> PAGE_CACHE_SHIFT; 502 return 0;
503
504 start_index = wbc->range_start >> PAGE_CACHE_SHIFT;
505 end_index = wbc->range_end >> PAGE_CACHE_SHIFT;
505 pmd_index = DAX_PMD_INDEX(start_index); 506 pmd_index = DAX_PMD_INDEX(start_index);
506 507
507 rcu_read_lock(); 508 rcu_read_lock();
diff --git a/fs/dcache.c b/fs/dcache.c
index 92d5140de851..2398f9f94337 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -269,9 +269,6 @@ static inline int dname_external(const struct dentry *dentry)
269 return dentry->d_name.name != dentry->d_iname; 269 return dentry->d_name.name != dentry->d_iname;
270} 270}
271 271
272/*
273 * Make sure other CPUs see the inode attached before the type is set.
274 */
275static inline void __d_set_inode_and_type(struct dentry *dentry, 272static inline void __d_set_inode_and_type(struct dentry *dentry,
276 struct inode *inode, 273 struct inode *inode,
277 unsigned type_flags) 274 unsigned type_flags)
@@ -279,28 +276,18 @@ static inline void __d_set_inode_and_type(struct dentry *dentry,
279 unsigned flags; 276 unsigned flags;
280 277
281 dentry->d_inode = inode; 278 dentry->d_inode = inode;
282 smp_wmb();
283 flags = READ_ONCE(dentry->d_flags); 279 flags = READ_ONCE(dentry->d_flags);
284 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); 280 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
285 flags |= type_flags; 281 flags |= type_flags;
286 WRITE_ONCE(dentry->d_flags, flags); 282 WRITE_ONCE(dentry->d_flags, flags);
287} 283}
288 284
289/*
290 * Ideally, we want to make sure that other CPUs see the flags cleared before
291 * the inode is detached, but this is really a violation of RCU principles
292 * since the ordering suggests we should always set inode before flags.
293 *
294 * We should instead replace or discard the entire dentry - but that sucks
295 * performancewise on mass deletion/rename.
296 */
297static inline void __d_clear_type_and_inode(struct dentry *dentry) 285static inline void __d_clear_type_and_inode(struct dentry *dentry)
298{ 286{
299 unsigned flags = READ_ONCE(dentry->d_flags); 287 unsigned flags = READ_ONCE(dentry->d_flags);
300 288
301 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); 289 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
302 WRITE_ONCE(dentry->d_flags, flags); 290 WRITE_ONCE(dentry->d_flags, flags);
303 smp_wmb();
304 dentry->d_inode = NULL; 291 dentry->d_inode = NULL;
305} 292}
306 293
@@ -370,9 +357,11 @@ static void dentry_unlink_inode(struct dentry * dentry)
370 __releases(dentry->d_inode->i_lock) 357 __releases(dentry->d_inode->i_lock)
371{ 358{
372 struct inode *inode = dentry->d_inode; 359 struct inode *inode = dentry->d_inode;
360
361 raw_write_seqcount_begin(&dentry->d_seq);
373 __d_clear_type_and_inode(dentry); 362 __d_clear_type_and_inode(dentry);
374 hlist_del_init(&dentry->d_u.d_alias); 363 hlist_del_init(&dentry->d_u.d_alias);
375 dentry_rcuwalk_invalidate(dentry); 364 raw_write_seqcount_end(&dentry->d_seq);
376 spin_unlock(&dentry->d_lock); 365 spin_unlock(&dentry->d_lock);
377 spin_unlock(&inode->i_lock); 366 spin_unlock(&inode->i_lock);
378 if (!inode->i_nlink) 367 if (!inode->i_nlink)
@@ -1758,8 +1747,9 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1758 spin_lock(&dentry->d_lock); 1747 spin_lock(&dentry->d_lock);
1759 if (inode) 1748 if (inode)
1760 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); 1749 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1750 raw_write_seqcount_begin(&dentry->d_seq);
1761 __d_set_inode_and_type(dentry, inode, add_flags); 1751 __d_set_inode_and_type(dentry, inode, add_flags);
1762 dentry_rcuwalk_invalidate(dentry); 1752 raw_write_seqcount_end(&dentry->d_seq);
1763 spin_unlock(&dentry->d_lock); 1753 spin_unlock(&dentry->d_lock);
1764 fsnotify_d_instantiate(dentry, inode); 1754 fsnotify_d_instantiate(dentry, inode);
1765} 1755}
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 1b2f7ffc8b84..d6a9012d42ad 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -472,8 +472,8 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
472 dio->io_error = -EIO; 472 dio->io_error = -EIO;
473 473
474 if (dio->is_async && dio->rw == READ && dio->should_dirty) { 474 if (dio->is_async && dio->rw == READ && dio->should_dirty) {
475 bio_check_pages_dirty(bio); /* transfers ownership */
476 err = bio->bi_error; 475 err = bio->bi_error;
476 bio_check_pages_dirty(bio); /* transfers ownership */
477 } else { 477 } else {
478 bio_for_each_segment_all(bvec, bio, i) { 478 bio_for_each_segment_all(bvec, bio, i) {
479 struct page *page = bvec->bv_page; 479 struct page *page = bvec->bv_page;
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 2c88d683cd91..c1400b109805 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -80,23 +80,6 @@ static int ext2_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
80 return ret; 80 return ret;
81} 81}
82 82
83static int ext2_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
84{
85 struct inode *inode = file_inode(vma->vm_file);
86 struct ext2_inode_info *ei = EXT2_I(inode);
87 int ret;
88
89 sb_start_pagefault(inode->i_sb);
90 file_update_time(vma->vm_file);
91 down_read(&ei->dax_sem);
92
93 ret = __dax_mkwrite(vma, vmf, ext2_get_block, NULL);
94
95 up_read(&ei->dax_sem);
96 sb_end_pagefault(inode->i_sb);
97 return ret;
98}
99
100static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma, 83static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
101 struct vm_fault *vmf) 84 struct vm_fault *vmf)
102{ 85{
@@ -124,7 +107,7 @@ static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
124static const struct vm_operations_struct ext2_dax_vm_ops = { 107static const struct vm_operations_struct ext2_dax_vm_ops = {
125 .fault = ext2_dax_fault, 108 .fault = ext2_dax_fault,
126 .pmd_fault = ext2_dax_pmd_fault, 109 .pmd_fault = ext2_dax_pmd_fault,
127 .page_mkwrite = ext2_dax_mkwrite, 110 .page_mkwrite = ext2_dax_fault,
128 .pfn_mkwrite = ext2_dax_pfn_mkwrite, 111 .pfn_mkwrite = ext2_dax_pfn_mkwrite,
129}; 112};
130 113
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 338eefda70c6..6bd58e6ff038 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -737,8 +737,10 @@ static int ext2_get_blocks(struct inode *inode,
737 * so that it's not found by another thread before it's 737 * so that it's not found by another thread before it's
738 * initialised 738 * initialised
739 */ 739 */
740 err = dax_clear_blocks(inode, le32_to_cpu(chain[depth-1].key), 740 err = dax_clear_sectors(inode->i_sb->s_bdev,
741 1 << inode->i_blkbits); 741 le32_to_cpu(chain[depth-1].key) <<
742 (inode->i_blkbits - 9),
743 1 << inode->i_blkbits);
742 if (err) { 744 if (err) {
743 mutex_unlock(&ei->truncate_mutex); 745 mutex_unlock(&ei->truncate_mutex);
744 goto cleanup; 746 goto cleanup;
@@ -874,6 +876,14 @@ ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
874static int 876static int
875ext2_writepages(struct address_space *mapping, struct writeback_control *wbc) 877ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
876{ 878{
879#ifdef CONFIG_FS_DAX
880 if (dax_mapping(mapping)) {
881 return dax_writeback_mapping_range(mapping,
882 mapping->host->i_sb->s_bdev,
883 wbc);
884 }
885#endif
886
877 return mpage_writepages(mapping, wbc, ext2_get_block); 887 return mpage_writepages(mapping, wbc, ext2_get_block);
878} 888}
879 889
@@ -1296,7 +1306,7 @@ void ext2_set_inode_flags(struct inode *inode)
1296 inode->i_flags |= S_NOATIME; 1306 inode->i_flags |= S_NOATIME;
1297 if (flags & EXT2_DIRSYNC_FL) 1307 if (flags & EXT2_DIRSYNC_FL)
1298 inode->i_flags |= S_DIRSYNC; 1308 inode->i_flags |= S_DIRSYNC;
1299 if (test_opt(inode->i_sb, DAX)) 1309 if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode))
1300 inode->i_flags |= S_DAX; 1310 inode->i_flags |= S_DAX;
1301} 1311}
1302 1312
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index ec0668a60678..fe1f50fe764f 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -191,7 +191,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
191 /* If checksum is bad mark all blocks used to prevent allocation 191 /* If checksum is bad mark all blocks used to prevent allocation
192 * essentially implementing a per-group read-only flag. */ 192 * essentially implementing a per-group read-only flag. */
193 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { 193 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
194 ext4_error(sb, "Checksum bad for group %u", block_group);
195 grp = ext4_get_group_info(sb, block_group); 194 grp = ext4_get_group_info(sb, block_group);
196 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 195 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
197 percpu_counter_sub(&sbi->s_freeclusters_counter, 196 percpu_counter_sub(&sbi->s_freeclusters_counter,
@@ -442,14 +441,16 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
442 } 441 }
443 ext4_lock_group(sb, block_group); 442 ext4_lock_group(sb, block_group);
444 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 443 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
445
446 err = ext4_init_block_bitmap(sb, bh, block_group, desc); 444 err = ext4_init_block_bitmap(sb, bh, block_group, desc);
447 set_bitmap_uptodate(bh); 445 set_bitmap_uptodate(bh);
448 set_buffer_uptodate(bh); 446 set_buffer_uptodate(bh);
449 ext4_unlock_group(sb, block_group); 447 ext4_unlock_group(sb, block_group);
450 unlock_buffer(bh); 448 unlock_buffer(bh);
451 if (err) 449 if (err) {
450 ext4_error(sb, "Failed to init block bitmap for group "
451 "%u: %d", block_group, err);
452 goto out; 452 goto out;
453 }
453 goto verify; 454 goto verify;
454 } 455 }
455 ext4_unlock_group(sb, block_group); 456 ext4_unlock_group(sb, block_group);
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index c8021208a7eb..38f7562489bb 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -467,3 +467,59 @@ uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
467 return size; 467 return size;
468 return 0; 468 return 0;
469} 469}
470
471/*
472 * Validate dentries for encrypted directories to make sure we aren't
473 * potentially caching stale data after a key has been added or
474 * removed.
475 */
476static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
477{
478 struct inode *dir = d_inode(dentry->d_parent);
479 struct ext4_crypt_info *ci = EXT4_I(dir)->i_crypt_info;
480 int dir_has_key, cached_with_key;
481
482 if (!ext4_encrypted_inode(dir))
483 return 0;
484
485 if (ci && ci->ci_keyring_key &&
486 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
487 (1 << KEY_FLAG_REVOKED) |
488 (1 << KEY_FLAG_DEAD))))
489 ci = NULL;
490
491 /* this should eventually be an flag in d_flags */
492 cached_with_key = dentry->d_fsdata != NULL;
493 dir_has_key = (ci != NULL);
494
495 /*
496 * If the dentry was cached without the key, and it is a
497 * negative dentry, it might be a valid name. We can't check
498 * if the key has since been made available due to locking
499 * reasons, so we fail the validation so ext4_lookup() can do
500 * this check.
501 *
502 * We also fail the validation if the dentry was created with
503 * the key present, but we no longer have the key, or vice versa.
504 */
505 if ((!cached_with_key && d_is_negative(dentry)) ||
506 (!cached_with_key && dir_has_key) ||
507 (cached_with_key && !dir_has_key)) {
508#if 0 /* Revalidation debug */
509 char buf[80];
510 char *cp = simple_dname(dentry, buf, sizeof(buf));
511
512 if (IS_ERR(cp))
513 cp = (char *) "???";
514 pr_err("revalidate: %s %p %d %d %d\n", cp, dentry->d_fsdata,
515 cached_with_key, d_is_negative(dentry),
516 dir_has_key);
517#endif
518 return 0;
519 }
520 return 1;
521}
522
523const struct dentry_operations ext4_encrypted_d_ops = {
524 .d_revalidate = ext4_d_revalidate,
525};
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 1d1bca74f844..33f5e2a50cf8 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -111,6 +111,12 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
111 int dir_has_error = 0; 111 int dir_has_error = 0;
112 struct ext4_str fname_crypto_str = {.name = NULL, .len = 0}; 112 struct ext4_str fname_crypto_str = {.name = NULL, .len = 0};
113 113
114 if (ext4_encrypted_inode(inode)) {
115 err = ext4_get_encryption_info(inode);
116 if (err && err != -ENOKEY)
117 return err;
118 }
119
114 if (is_dx_dir(inode)) { 120 if (is_dx_dir(inode)) {
115 err = ext4_dx_readdir(file, ctx); 121 err = ext4_dx_readdir(file, ctx);
116 if (err != ERR_BAD_DX_DIR) { 122 if (err != ERR_BAD_DX_DIR) {
@@ -157,8 +163,11 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
157 index, 1); 163 index, 1);
158 file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; 164 file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
159 bh = ext4_bread(NULL, inode, map.m_lblk, 0); 165 bh = ext4_bread(NULL, inode, map.m_lblk, 0);
160 if (IS_ERR(bh)) 166 if (IS_ERR(bh)) {
161 return PTR_ERR(bh); 167 err = PTR_ERR(bh);
168 bh = NULL;
169 goto errout;
170 }
162 } 171 }
163 172
164 if (!bh) { 173 if (!bh) {
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 0662b285dc8a..157b458a69d4 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2302,6 +2302,7 @@ struct page *ext4_encrypt(struct inode *inode,
2302int ext4_decrypt(struct page *page); 2302int ext4_decrypt(struct page *page);
2303int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk, 2303int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
2304 ext4_fsblk_t pblk, ext4_lblk_t len); 2304 ext4_fsblk_t pblk, ext4_lblk_t len);
2305extern const struct dentry_operations ext4_encrypted_d_ops;
2305 2306
2306#ifdef CONFIG_EXT4_FS_ENCRYPTION 2307#ifdef CONFIG_EXT4_FS_ENCRYPTION
2307int ext4_init_crypto(void); 2308int ext4_init_crypto(void);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 0ffabaf90aa5..3753ceb0b0dd 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3928,7 +3928,7 @@ static int
3928convert_initialized_extent(handle_t *handle, struct inode *inode, 3928convert_initialized_extent(handle_t *handle, struct inode *inode,
3929 struct ext4_map_blocks *map, 3929 struct ext4_map_blocks *map,
3930 struct ext4_ext_path **ppath, int flags, 3930 struct ext4_ext_path **ppath, int flags,
3931 unsigned int allocated, ext4_fsblk_t newblock) 3931 unsigned int allocated)
3932{ 3932{
3933 struct ext4_ext_path *path = *ppath; 3933 struct ext4_ext_path *path = *ppath;
3934 struct ext4_extent *ex; 3934 struct ext4_extent *ex;
@@ -4347,7 +4347,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4347 (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) { 4347 (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
4348 allocated = convert_initialized_extent( 4348 allocated = convert_initialized_extent(
4349 handle, inode, map, &path, 4349 handle, inode, map, &path,
4350 flags, allocated, newblock); 4350 flags, allocated);
4351 goto out2; 4351 goto out2;
4352 } else if (!ext4_ext_is_unwritten(ex)) 4352 } else if (!ext4_ext_is_unwritten(ex))
4353 goto out; 4353 goto out;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 1126436dada1..4cd318f31cbe 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -262,23 +262,8 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
262 return result; 262 return result;
263} 263}
264 264
265static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
266{
267 int err;
268 struct inode *inode = file_inode(vma->vm_file);
269
270 sb_start_pagefault(inode->i_sb);
271 file_update_time(vma->vm_file);
272 down_read(&EXT4_I(inode)->i_mmap_sem);
273 err = __dax_mkwrite(vma, vmf, ext4_dax_mmap_get_block, NULL);
274 up_read(&EXT4_I(inode)->i_mmap_sem);
275 sb_end_pagefault(inode->i_sb);
276
277 return err;
278}
279
280/* 265/*
281 * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_mkwrite() 266 * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault()
282 * handler we check for races agaist truncate. Note that since we cycle through 267 * handler we check for races agaist truncate. Note that since we cycle through
283 * i_mmap_sem, we are sure that also any hole punching that began before we 268 * i_mmap_sem, we are sure that also any hole punching that began before we
284 * were called is finished by now and so if it included part of the file we 269 * were called is finished by now and so if it included part of the file we
@@ -311,7 +296,7 @@ static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
311static const struct vm_operations_struct ext4_dax_vm_ops = { 296static const struct vm_operations_struct ext4_dax_vm_ops = {
312 .fault = ext4_dax_fault, 297 .fault = ext4_dax_fault,
313 .pmd_fault = ext4_dax_pmd_fault, 298 .pmd_fault = ext4_dax_pmd_fault,
314 .page_mkwrite = ext4_dax_mkwrite, 299 .page_mkwrite = ext4_dax_fault,
315 .pfn_mkwrite = ext4_dax_pfn_mkwrite, 300 .pfn_mkwrite = ext4_dax_pfn_mkwrite,
316}; 301};
317#else 302#else
@@ -350,6 +335,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
350 struct super_block *sb = inode->i_sb; 335 struct super_block *sb = inode->i_sb;
351 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 336 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
352 struct vfsmount *mnt = filp->f_path.mnt; 337 struct vfsmount *mnt = filp->f_path.mnt;
338 struct inode *dir = filp->f_path.dentry->d_parent->d_inode;
353 struct path path; 339 struct path path;
354 char buf[64], *cp; 340 char buf[64], *cp;
355 int ret; 341 int ret;
@@ -393,6 +379,14 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
393 if (ext4_encryption_info(inode) == NULL) 379 if (ext4_encryption_info(inode) == NULL)
394 return -ENOKEY; 380 return -ENOKEY;
395 } 381 }
382 if (ext4_encrypted_inode(dir) &&
383 !ext4_is_child_context_consistent_with_parent(dir, inode)) {
384 ext4_warning(inode->i_sb,
385 "Inconsistent encryption contexts: %lu/%lu\n",
386 (unsigned long) dir->i_ino,
387 (unsigned long) inode->i_ino);
388 return -EPERM;
389 }
396 /* 390 /*
397 * Set up the jbd2_inode if we are opening the inode for 391 * Set up the jbd2_inode if we are opening the inode for
398 * writing and the journal is present 392 * writing and the journal is present
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 3fcfd50a2e8a..acc0ad56bf2f 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -76,7 +76,6 @@ static int ext4_init_inode_bitmap(struct super_block *sb,
76 /* If checksum is bad mark all blocks and inodes use to prevent 76 /* If checksum is bad mark all blocks and inodes use to prevent
77 * allocation, essentially implementing a per-group read-only flag. */ 77 * allocation, essentially implementing a per-group read-only flag. */
78 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { 78 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
79 ext4_error(sb, "Checksum bad for group %u", block_group);
80 grp = ext4_get_group_info(sb, block_group); 79 grp = ext4_get_group_info(sb, block_group);
81 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 80 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
82 percpu_counter_sub(&sbi->s_freeclusters_counter, 81 percpu_counter_sub(&sbi->s_freeclusters_counter,
@@ -191,8 +190,11 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
191 set_buffer_verified(bh); 190 set_buffer_verified(bh);
192 ext4_unlock_group(sb, block_group); 191 ext4_unlock_group(sb, block_group);
193 unlock_buffer(bh); 192 unlock_buffer(bh);
194 if (err) 193 if (err) {
194 ext4_error(sb, "Failed to init inode bitmap for group "
195 "%u: %d", block_group, err);
195 goto out; 196 goto out;
197 }
196 return bh; 198 return bh;
197 } 199 }
198 ext4_unlock_group(sb, block_group); 200 ext4_unlock_group(sb, block_group);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 83bc8bfb3bea..aee960b1af34 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -686,6 +686,34 @@ out_sem:
686 return retval; 686 return retval;
687} 687}
688 688
689/*
690 * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
691 * we have to be careful as someone else may be manipulating b_state as well.
692 */
693static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
694{
695 unsigned long old_state;
696 unsigned long new_state;
697
698 flags &= EXT4_MAP_FLAGS;
699
700 /* Dummy buffer_head? Set non-atomically. */
701 if (!bh->b_page) {
702 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
703 return;
704 }
705 /*
706 * Someone else may be modifying b_state. Be careful! This is ugly but
707 * once we get rid of using bh as a container for mapping information
708 * to pass to / from get_block functions, this can go away.
709 */
710 do {
711 old_state = READ_ONCE(bh->b_state);
712 new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
713 } while (unlikely(
714 cmpxchg(&bh->b_state, old_state, new_state) != old_state));
715}
716
689/* Maximum number of blocks we map for direct IO at once. */ 717/* Maximum number of blocks we map for direct IO at once. */
690#define DIO_MAX_BLOCKS 4096 718#define DIO_MAX_BLOCKS 4096
691 719
@@ -722,7 +750,7 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
722 ext4_io_end_t *io_end = ext4_inode_aio(inode); 750 ext4_io_end_t *io_end = ext4_inode_aio(inode);
723 751
724 map_bh(bh, inode->i_sb, map.m_pblk); 752 map_bh(bh, inode->i_sb, map.m_pblk);
725 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 753 ext4_update_bh_state(bh, map.m_flags);
726 if (io_end && io_end->flag & EXT4_IO_END_UNWRITTEN) 754 if (io_end && io_end->flag & EXT4_IO_END_UNWRITTEN)
727 set_buffer_defer_completion(bh); 755 set_buffer_defer_completion(bh);
728 bh->b_size = inode->i_sb->s_blocksize * map.m_len; 756 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
@@ -1685,7 +1713,7 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1685 return ret; 1713 return ret;
1686 1714
1687 map_bh(bh, inode->i_sb, map.m_pblk); 1715 map_bh(bh, inode->i_sb, map.m_pblk);
1688 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 1716 ext4_update_bh_state(bh, map.m_flags);
1689 1717
1690 if (buffer_unwritten(bh)) { 1718 if (buffer_unwritten(bh)) {
1691 /* A delayed write to unwritten bh should be marked 1719 /* A delayed write to unwritten bh should be marked
@@ -2450,6 +2478,10 @@ static int ext4_writepages(struct address_space *mapping,
2450 2478
2451 trace_ext4_writepages(inode, wbc); 2479 trace_ext4_writepages(inode, wbc);
2452 2480
2481 if (dax_mapping(mapping))
2482 return dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev,
2483 wbc);
2484
2453 /* 2485 /*
2454 * No pages to write? This is mainly a kludge to avoid starting 2486 * No pages to write? This is mainly a kludge to avoid starting
2455 * a transaction for special inodes like journal inode on last iput() 2487 * a transaction for special inodes like journal inode on last iput()
@@ -3253,29 +3285,29 @@ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
3253 * case, we allocate an io_end structure to hook to the iocb. 3285 * case, we allocate an io_end structure to hook to the iocb.
3254 */ 3286 */
3255 iocb->private = NULL; 3287 iocb->private = NULL;
3256 ext4_inode_aio_set(inode, NULL);
3257 if (!is_sync_kiocb(iocb)) {
3258 io_end = ext4_init_io_end(inode, GFP_NOFS);
3259 if (!io_end) {
3260 ret = -ENOMEM;
3261 goto retake_lock;
3262 }
3263 /*
3264 * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
3265 */
3266 iocb->private = ext4_get_io_end(io_end);
3267 /*
3268 * we save the io structure for current async direct
3269 * IO, so that later ext4_map_blocks() could flag the
3270 * io structure whether there is a unwritten extents
3271 * needs to be converted when IO is completed.
3272 */
3273 ext4_inode_aio_set(inode, io_end);
3274 }
3275
3276 if (overwrite) { 3288 if (overwrite) {
3277 get_block_func = ext4_get_block_overwrite; 3289 get_block_func = ext4_get_block_overwrite;
3278 } else { 3290 } else {
3291 ext4_inode_aio_set(inode, NULL);
3292 if (!is_sync_kiocb(iocb)) {
3293 io_end = ext4_init_io_end(inode, GFP_NOFS);
3294 if (!io_end) {
3295 ret = -ENOMEM;
3296 goto retake_lock;
3297 }
3298 /*
3299 * Grab reference for DIO. Will be dropped in
3300 * ext4_end_io_dio()
3301 */
3302 iocb->private = ext4_get_io_end(io_end);
3303 /*
3304 * we save the io structure for current async direct
3305 * IO, so that later ext4_map_blocks() could flag the
3306 * io structure whether there is a unwritten extents
3307 * needs to be converted when IO is completed.
3308 */
3309 ext4_inode_aio_set(inode, io_end);
3310 }
3279 get_block_func = ext4_get_block_write; 3311 get_block_func = ext4_get_block_write;
3280 dio_flags = DIO_LOCKING; 3312 dio_flags = DIO_LOCKING;
3281 } 3313 }
@@ -4127,7 +4159,7 @@ void ext4_set_inode_flags(struct inode *inode)
4127 new_fl |= S_NOATIME; 4159 new_fl |= S_NOATIME;
4128 if (flags & EXT4_DIRSYNC_FL) 4160 if (flags & EXT4_DIRSYNC_FL)
4129 new_fl |= S_DIRSYNC; 4161 new_fl |= S_DIRSYNC;
4130 if (test_opt(inode->i_sb, DAX)) 4162 if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode))
4131 new_fl |= S_DAX; 4163 new_fl |= S_DAX;
4132 inode_set_flags(inode, new_fl, 4164 inode_set_flags(inode, new_fl,
4133 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX); 4165 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 0f6c36922c24..eae5917c534e 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -208,7 +208,7 @@ static int ext4_ioctl_setflags(struct inode *inode,
208{ 208{
209 struct ext4_inode_info *ei = EXT4_I(inode); 209 struct ext4_inode_info *ei = EXT4_I(inode);
210 handle_t *handle = NULL; 210 handle_t *handle = NULL;
211 int err = EPERM, migrate = 0; 211 int err = -EPERM, migrate = 0;
212 struct ext4_iloc iloc; 212 struct ext4_iloc iloc;
213 unsigned int oldflags, mask, i; 213 unsigned int oldflags, mask, i;
214 unsigned int jflag; 214 unsigned int jflag;
@@ -583,6 +583,11 @@ group_extend_out:
583 "Online defrag not supported with bigalloc"); 583 "Online defrag not supported with bigalloc");
584 err = -EOPNOTSUPP; 584 err = -EOPNOTSUPP;
585 goto mext_out; 585 goto mext_out;
586 } else if (IS_DAX(inode)) {
587 ext4_msg(sb, KERN_ERR,
588 "Online defrag not supported with DAX");
589 err = -EOPNOTSUPP;
590 goto mext_out;
586 } 591 }
587 592
588 err = mnt_want_write_file(filp); 593 err = mnt_want_write_file(filp);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 61eaf74dca37..4424b7bf8ac6 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2285,7 +2285,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2285 if (group == 0) 2285 if (group == 0)
2286 seq_puts(seq, "#group: free frags first [" 2286 seq_puts(seq, "#group: free frags first ["
2287 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " 2287 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 "
2288 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]"); 2288 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n");
2289 2289
2290 i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + 2290 i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2291 sizeof(struct ext4_group_info); 2291 sizeof(struct ext4_group_info);
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index fb6f11709ae6..e032a0423e35 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -265,11 +265,12 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
265 ext4_lblk_t orig_blk_offset, donor_blk_offset; 265 ext4_lblk_t orig_blk_offset, donor_blk_offset;
266 unsigned long blocksize = orig_inode->i_sb->s_blocksize; 266 unsigned long blocksize = orig_inode->i_sb->s_blocksize;
267 unsigned int tmp_data_size, data_size, replaced_size; 267 unsigned int tmp_data_size, data_size, replaced_size;
268 int err2, jblocks, retries = 0; 268 int i, err2, jblocks, retries = 0;
269 int replaced_count = 0; 269 int replaced_count = 0;
270 int from = data_offset_in_page << orig_inode->i_blkbits; 270 int from = data_offset_in_page << orig_inode->i_blkbits;
271 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; 271 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
272 struct super_block *sb = orig_inode->i_sb; 272 struct super_block *sb = orig_inode->i_sb;
273 struct buffer_head *bh = NULL;
273 274
274 /* 275 /*
275 * It needs twice the amount of ordinary journal buffers because 276 * It needs twice the amount of ordinary journal buffers because
@@ -380,8 +381,16 @@ data_copy:
380 } 381 }
381 /* Perform all necessary steps similar write_begin()/write_end() 382 /* Perform all necessary steps similar write_begin()/write_end()
382 * but keeping in mind that i_size will not change */ 383 * but keeping in mind that i_size will not change */
383 *err = __block_write_begin(pagep[0], from, replaced_size, 384 if (!page_has_buffers(pagep[0]))
384 ext4_get_block); 385 create_empty_buffers(pagep[0], 1 << orig_inode->i_blkbits, 0);
386 bh = page_buffers(pagep[0]);
387 for (i = 0; i < data_offset_in_page; i++)
388 bh = bh->b_this_page;
389 for (i = 0; i < block_len_in_page; i++) {
390 *err = ext4_get_block(orig_inode, orig_blk_offset + i, bh, 0);
391 if (*err < 0)
392 break;
393 }
385 if (!*err) 394 if (!*err)
386 *err = block_commit_write(pagep[0], from, from + replaced_size); 395 *err = block_commit_write(pagep[0], from, from + replaced_size);
387 396
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 06574dd77614..48e4b8907826 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1558,6 +1558,24 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
1558 struct ext4_dir_entry_2 *de; 1558 struct ext4_dir_entry_2 *de;
1559 struct buffer_head *bh; 1559 struct buffer_head *bh;
1560 1560
1561 if (ext4_encrypted_inode(dir)) {
1562 int res = ext4_get_encryption_info(dir);
1563
1564 /*
1565 * This should be a properly defined flag for
1566 * dentry->d_flags when we uplift this to the VFS.
1567 * d_fsdata is set to (void *) 1 if if the dentry is
1568 * created while the directory was encrypted and we
1569 * don't have access to the key.
1570 */
1571 dentry->d_fsdata = NULL;
1572 if (ext4_encryption_info(dir))
1573 dentry->d_fsdata = (void *) 1;
1574 d_set_d_op(dentry, &ext4_encrypted_d_ops);
1575 if (res && res != -ENOKEY)
1576 return ERR_PTR(res);
1577 }
1578
1561 if (dentry->d_name.len > EXT4_NAME_LEN) 1579 if (dentry->d_name.len > EXT4_NAME_LEN)
1562 return ERR_PTR(-ENAMETOOLONG); 1580 return ERR_PTR(-ENAMETOOLONG);
1563 1581
@@ -1585,11 +1603,15 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
1585 return ERR_PTR(-EFSCORRUPTED); 1603 return ERR_PTR(-EFSCORRUPTED);
1586 } 1604 }
1587 if (!IS_ERR(inode) && ext4_encrypted_inode(dir) && 1605 if (!IS_ERR(inode) && ext4_encrypted_inode(dir) &&
1588 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1606 (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
1589 S_ISLNK(inode->i_mode)) &&
1590 !ext4_is_child_context_consistent_with_parent(dir, 1607 !ext4_is_child_context_consistent_with_parent(dir,
1591 inode)) { 1608 inode)) {
1609 int nokey = ext4_encrypted_inode(inode) &&
1610 !ext4_encryption_info(inode);
1611
1592 iput(inode); 1612 iput(inode);
1613 if (nokey)
1614 return ERR_PTR(-ENOKEY);
1593 ext4_warning(inode->i_sb, 1615 ext4_warning(inode->i_sb,
1594 "Inconsistent encryption contexts: %lu/%lu\n", 1616 "Inconsistent encryption contexts: %lu/%lu\n",
1595 (unsigned long) dir->i_ino, 1617 (unsigned long) dir->i_ino,
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index ad62d7acc315..34038e3598d5 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -198,7 +198,7 @@ static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
198 if (flex_gd == NULL) 198 if (flex_gd == NULL)
199 goto out3; 199 goto out3;
200 200
201 if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data)) 201 if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
202 goto out2; 202 goto out2;
203 flex_gd->count = flexbg_size; 203 flex_gd->count = flexbg_size;
204 204
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 6915c950e6e8..5c46ed9f3e14 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -223,6 +223,9 @@ static void wb_wait_for_completion(struct backing_dev_info *bdi,
223#define WB_FRN_HIST_MAX_SLOTS (WB_FRN_HIST_THR_SLOTS / 2 + 1) 223#define WB_FRN_HIST_MAX_SLOTS (WB_FRN_HIST_THR_SLOTS / 2 + 1)
224 /* one round can affect upto 5 slots */ 224 /* one round can affect upto 5 slots */
225 225
226static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
227static struct workqueue_struct *isw_wq;
228
226void __inode_attach_wb(struct inode *inode, struct page *page) 229void __inode_attach_wb(struct inode *inode, struct page *page)
227{ 230{
228 struct backing_dev_info *bdi = inode_to_bdi(inode); 231 struct backing_dev_info *bdi = inode_to_bdi(inode);
@@ -424,6 +427,8 @@ skip_switch:
424 427
425 iput(inode); 428 iput(inode);
426 kfree(isw); 429 kfree(isw);
430
431 atomic_dec(&isw_nr_in_flight);
427} 432}
428 433
429static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head) 434static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head)
@@ -433,7 +438,7 @@ static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head)
433 438
434 /* needs to grab bh-unsafe locks, bounce to work item */ 439 /* needs to grab bh-unsafe locks, bounce to work item */
435 INIT_WORK(&isw->work, inode_switch_wbs_work_fn); 440 INIT_WORK(&isw->work, inode_switch_wbs_work_fn);
436 schedule_work(&isw->work); 441 queue_work(isw_wq, &isw->work);
437} 442}
438 443
439/** 444/**
@@ -469,7 +474,8 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
469 474
470 /* while holding I_WB_SWITCH, no one else can update the association */ 475 /* while holding I_WB_SWITCH, no one else can update the association */
471 spin_lock(&inode->i_lock); 476 spin_lock(&inode->i_lock);
472 if (inode->i_state & (I_WB_SWITCH | I_FREEING) || 477 if (!(inode->i_sb->s_flags & MS_ACTIVE) ||
478 inode->i_state & (I_WB_SWITCH | I_FREEING) ||
473 inode_to_wb(inode) == isw->new_wb) { 479 inode_to_wb(inode) == isw->new_wb) {
474 spin_unlock(&inode->i_lock); 480 spin_unlock(&inode->i_lock);
475 goto out_free; 481 goto out_free;
@@ -480,6 +486,8 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
480 ihold(inode); 486 ihold(inode);
481 isw->inode = inode; 487 isw->inode = inode;
482 488
489 atomic_inc(&isw_nr_in_flight);
490
483 /* 491 /*
484 * In addition to synchronizing among switchers, I_WB_SWITCH tells 492 * In addition to synchronizing among switchers, I_WB_SWITCH tells
485 * the RCU protected stat update paths to grab the mapping's 493 * the RCU protected stat update paths to grab the mapping's
@@ -840,6 +848,33 @@ restart:
840 wb_put(last_wb); 848 wb_put(last_wb);
841} 849}
842 850
851/**
852 * cgroup_writeback_umount - flush inode wb switches for umount
853 *
854 * This function is called when a super_block is about to be destroyed and
855 * flushes in-flight inode wb switches. An inode wb switch goes through
856 * RCU and then workqueue, so the two need to be flushed in order to ensure
857 * that all previously scheduled switches are finished. As wb switches are
858 * rare occurrences and synchronize_rcu() can take a while, perform
859 * flushing iff wb switches are in flight.
860 */
861void cgroup_writeback_umount(void)
862{
863 if (atomic_read(&isw_nr_in_flight)) {
864 synchronize_rcu();
865 flush_workqueue(isw_wq);
866 }
867}
868
869static int __init cgroup_writeback_init(void)
870{
871 isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0);
872 if (!isw_wq)
873 return -ENOMEM;
874 return 0;
875}
876fs_initcall(cgroup_writeback_init);
877
843#else /* CONFIG_CGROUP_WRITEBACK */ 878#else /* CONFIG_CGROUP_WRITEBACK */
844 879
845static struct bdi_writeback * 880static struct bdi_writeback *
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index 506765afa1a3..bb8d67e2740a 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -376,12 +376,11 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
376 struct inode *inode = d_inode(dentry); 376 struct inode *inode = d_inode(dentry);
377 dnode_secno dno; 377 dnode_secno dno;
378 int r; 378 int r;
379 int rep = 0;
380 int err; 379 int err;
381 380
382 hpfs_lock(dir->i_sb); 381 hpfs_lock(dir->i_sb);
383 hpfs_adjust_length(name, &len); 382 hpfs_adjust_length(name, &len);
384again: 383
385 err = -ENOENT; 384 err = -ENOENT;
386 de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh); 385 de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
387 if (!de) 386 if (!de)
@@ -401,33 +400,9 @@ again:
401 hpfs_error(dir->i_sb, "there was error when removing dirent"); 400 hpfs_error(dir->i_sb, "there was error when removing dirent");
402 err = -EFSERROR; 401 err = -EFSERROR;
403 break; 402 break;
404 case 2: /* no space for deleting, try to truncate file */ 403 case 2: /* no space for deleting */
405
406 err = -ENOSPC; 404 err = -ENOSPC;
407 if (rep++) 405 break;
408 break;
409
410 dentry_unhash(dentry);
411 if (!d_unhashed(dentry)) {
412 hpfs_unlock(dir->i_sb);
413 return -ENOSPC;
414 }
415 if (generic_permission(inode, MAY_WRITE) ||
416 !S_ISREG(inode->i_mode) ||
417 get_write_access(inode)) {
418 d_rehash(dentry);
419 } else {
420 struct iattr newattrs;
421 /*pr_info("truncating file before delete.\n");*/
422 newattrs.ia_size = 0;
423 newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
424 err = notify_change(dentry, &newattrs, NULL);
425 put_write_access(inode);
426 if (!err)
427 goto again;
428 }
429 hpfs_unlock(dir->i_sb);
430 return -ENOSPC;
431 default: 406 default:
432 drop_nlink(inode); 407 drop_nlink(inode);
433 err = 0; 408 err = 0;
diff --git a/fs/inode.c b/fs/inode.c
index 9f62db3bcc3e..69b8b526c194 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -154,6 +154,12 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
154 inode->i_rdev = 0; 154 inode->i_rdev = 0;
155 inode->dirtied_when = 0; 155 inode->dirtied_when = 0;
156 156
157#ifdef CONFIG_CGROUP_WRITEBACK
158 inode->i_wb_frn_winner = 0;
159 inode->i_wb_frn_avg_time = 0;
160 inode->i_wb_frn_history = 0;
161#endif
162
157 if (security_inode_alloc(inode)) 163 if (security_inode_alloc(inode))
158 goto out; 164 goto out;
159 spin_lock_init(&inode->i_lock); 165 spin_lock_init(&inode->i_lock);
diff --git a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking
index 3ea36554107f..8918ac905a3b 100644
--- a/fs/jffs2/README.Locking
+++ b/fs/jffs2/README.Locking
@@ -2,10 +2,6 @@
2 JFFS2 LOCKING DOCUMENTATION 2 JFFS2 LOCKING DOCUMENTATION
3 --------------------------- 3 ---------------------------
4 4
5At least theoretically, JFFS2 does not require the Big Kernel Lock
6(BKL), which was always helpfully obtained for it by Linux 2.4 VFS
7code. It has its own locking, as described below.
8
9This document attempts to describe the existing locking rules for 5This document attempts to describe the existing locking rules for
10JFFS2. It is not expected to remain perfectly up to date, but ought to 6JFFS2. It is not expected to remain perfectly up to date, but ought to
11be fairly close. 7be fairly close.
@@ -69,6 +65,7 @@ Ordering constraints:
69 any f->sem held. 65 any f->sem held.
70 2. Never attempt to lock two file mutexes in one thread. 66 2. Never attempt to lock two file mutexes in one thread.
71 No ordering rules have been made for doing so. 67 No ordering rules have been made for doing so.
68 3. Never lock a page cache page with f->sem held.
72 69
73 70
74 erase_completion_lock spinlock 71 erase_completion_lock spinlock
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
index 0ae91ad6df2d..b288c8ae1236 100644
--- a/fs/jffs2/build.c
+++ b/fs/jffs2/build.c
@@ -50,7 +50,8 @@ next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
50 50
51 51
52static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, 52static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
53 struct jffs2_inode_cache *ic) 53 struct jffs2_inode_cache *ic,
54 int *dir_hardlinks)
54{ 55{
55 struct jffs2_full_dirent *fd; 56 struct jffs2_full_dirent *fd;
56 57
@@ -69,19 +70,21 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
69 dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n", 70 dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
70 fd->name, fd->ino, ic->ino); 71 fd->name, fd->ino, ic->ino);
71 jffs2_mark_node_obsolete(c, fd->raw); 72 jffs2_mark_node_obsolete(c, fd->raw);
73 /* Clear the ic/raw union so it doesn't cause problems later. */
74 fd->ic = NULL;
72 continue; 75 continue;
73 } 76 }
74 77
78 /* From this point, fd->raw is no longer used so we can set fd->ic */
79 fd->ic = child_ic;
80 child_ic->pino_nlink++;
81 /* If we appear (at this stage) to have hard-linked directories,
82 * set a flag to trigger a scan later */
75 if (fd->type == DT_DIR) { 83 if (fd->type == DT_DIR) {
76 if (child_ic->pino_nlink) { 84 child_ic->flags |= INO_FLAGS_IS_DIR;
77 JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", 85 if (child_ic->pino_nlink > 1)
78 fd->name, fd->ino, ic->ino); 86 *dir_hardlinks = 1;
79 /* TODO: What do we do about it? */ 87 }
80 } else {
81 child_ic->pino_nlink = ic->ino;
82 }
83 } else
84 child_ic->pino_nlink++;
85 88
86 dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino); 89 dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino);
87 /* Can't free scan_dents so far. We might need them in pass 2 */ 90 /* Can't free scan_dents so far. We might need them in pass 2 */
@@ -95,8 +98,7 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
95*/ 98*/
96static int jffs2_build_filesystem(struct jffs2_sb_info *c) 99static int jffs2_build_filesystem(struct jffs2_sb_info *c)
97{ 100{
98 int ret; 101 int ret, i, dir_hardlinks = 0;
99 int i;
100 struct jffs2_inode_cache *ic; 102 struct jffs2_inode_cache *ic;
101 struct jffs2_full_dirent *fd; 103 struct jffs2_full_dirent *fd;
102 struct jffs2_full_dirent *dead_fds = NULL; 104 struct jffs2_full_dirent *dead_fds = NULL;
@@ -120,7 +122,7 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
120 /* Now scan the directory tree, increasing nlink according to every dirent found. */ 122 /* Now scan the directory tree, increasing nlink according to every dirent found. */
121 for_each_inode(i, c, ic) { 123 for_each_inode(i, c, ic) {
122 if (ic->scan_dents) { 124 if (ic->scan_dents) {
123 jffs2_build_inode_pass1(c, ic); 125 jffs2_build_inode_pass1(c, ic, &dir_hardlinks);
124 cond_resched(); 126 cond_resched();
125 } 127 }
126 } 128 }
@@ -156,6 +158,20 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
156 } 158 }
157 159
158 dbg_fsbuild("pass 2a complete\n"); 160 dbg_fsbuild("pass 2a complete\n");
161
162 if (dir_hardlinks) {
163 /* If we detected directory hardlinks earlier, *hopefully*
164 * they are gone now because some of the links were from
165 * dead directories which still had some old dirents lying
166 * around and not yet garbage-collected, but which have
167 * been discarded above. So clear the pino_nlink field
168 * in each directory, so that the final scan below can
169 * print appropriate warnings. */
170 for_each_inode(i, c, ic) {
171 if (ic->flags & INO_FLAGS_IS_DIR)
172 ic->pino_nlink = 0;
173 }
174 }
159 dbg_fsbuild("freeing temporary data structures\n"); 175 dbg_fsbuild("freeing temporary data structures\n");
160 176
161 /* Finally, we can scan again and free the dirent structs */ 177 /* Finally, we can scan again and free the dirent structs */
@@ -163,6 +179,33 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
163 while(ic->scan_dents) { 179 while(ic->scan_dents) {
164 fd = ic->scan_dents; 180 fd = ic->scan_dents;
165 ic->scan_dents = fd->next; 181 ic->scan_dents = fd->next;
182 /* We do use the pino_nlink field to count nlink of
183 * directories during fs build, so set it to the
184 * parent ino# now. Now that there's hopefully only
185 * one. */
186 if (fd->type == DT_DIR) {
187 if (!fd->ic) {
188 /* We'll have complained about it and marked the coresponding
189 raw node obsolete already. Just skip it. */
190 continue;
191 }
192
193 /* We *have* to have set this in jffs2_build_inode_pass1() */
194 BUG_ON(!(fd->ic->flags & INO_FLAGS_IS_DIR));
195
196 /* We clear ic->pino_nlink ∀ directories' ic *only* if dir_hardlinks
197 * is set. Otherwise, we know this should never trigger anyway, so
198 * we don't do the check. And ic->pino_nlink still contains the nlink
199 * value (which is 1). */
200 if (dir_hardlinks && fd->ic->pino_nlink) {
201 JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u is also hard linked from dir ino #%u\n",
202 fd->name, fd->ino, ic->ino, fd->ic->pino_nlink);
203 /* Should we unlink it from its previous parent? */
204 }
205
206 /* For directories, ic->pino_nlink holds that parent inode # */
207 fd->ic->pino_nlink = ic->ino;
208 }
166 jffs2_free_full_dirent(fd); 209 jffs2_free_full_dirent(fd);
167 } 210 }
168 ic->scan_dents = NULL; 211 ic->scan_dents = NULL;
@@ -241,11 +284,7 @@ static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c,
241 284
242 /* Reduce nlink of the child. If it's now zero, stick it on the 285 /* Reduce nlink of the child. If it's now zero, stick it on the
243 dead_fds list to be cleaned up later. Else just free the fd */ 286 dead_fds list to be cleaned up later. Else just free the fd */
244 287 child_ic->pino_nlink--;
245 if (fd->type == DT_DIR)
246 child_ic->pino_nlink = 0;
247 else
248 child_ic->pino_nlink--;
249 288
250 if (!child_ic->pino_nlink) { 289 if (!child_ic->pino_nlink) {
251 dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n", 290 dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n",
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index c5ac5944bc1b..cad86bac3453 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -137,39 +137,33 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
137 struct page *pg; 137 struct page *pg;
138 struct inode *inode = mapping->host; 138 struct inode *inode = mapping->host;
139 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 139 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
140 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
141 struct jffs2_raw_inode ri;
142 uint32_t alloc_len = 0;
143 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 140 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
144 uint32_t pageofs = index << PAGE_CACHE_SHIFT; 141 uint32_t pageofs = index << PAGE_CACHE_SHIFT;
145 int ret = 0; 142 int ret = 0;
146 143
147 jffs2_dbg(1, "%s()\n", __func__);
148
149 if (pageofs > inode->i_size) {
150 ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
151 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
152 if (ret)
153 return ret;
154 }
155
156 mutex_lock(&f->sem);
157 pg = grab_cache_page_write_begin(mapping, index, flags); 144 pg = grab_cache_page_write_begin(mapping, index, flags);
158 if (!pg) { 145 if (!pg)
159 if (alloc_len)
160 jffs2_complete_reservation(c);
161 mutex_unlock(&f->sem);
162 return -ENOMEM; 146 return -ENOMEM;
163 }
164 *pagep = pg; 147 *pagep = pg;
165 148
166 if (alloc_len) { 149 jffs2_dbg(1, "%s()\n", __func__);
150
151 if (pageofs > inode->i_size) {
167 /* Make new hole frag from old EOF to new page */ 152 /* Make new hole frag from old EOF to new page */
153 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
154 struct jffs2_raw_inode ri;
168 struct jffs2_full_dnode *fn; 155 struct jffs2_full_dnode *fn;
156 uint32_t alloc_len;
169 157
170 jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", 158 jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
171 (unsigned int)inode->i_size, pageofs); 159 (unsigned int)inode->i_size, pageofs);
172 160
161 ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
162 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
163 if (ret)
164 goto out_page;
165
166 mutex_lock(&f->sem);
173 memset(&ri, 0, sizeof(ri)); 167 memset(&ri, 0, sizeof(ri));
174 168
175 ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); 169 ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -196,6 +190,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
196 if (IS_ERR(fn)) { 190 if (IS_ERR(fn)) {
197 ret = PTR_ERR(fn); 191 ret = PTR_ERR(fn);
198 jffs2_complete_reservation(c); 192 jffs2_complete_reservation(c);
193 mutex_unlock(&f->sem);
199 goto out_page; 194 goto out_page;
200 } 195 }
201 ret = jffs2_add_full_dnode_to_inode(c, f, fn); 196 ret = jffs2_add_full_dnode_to_inode(c, f, fn);
@@ -210,10 +205,12 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
210 jffs2_mark_node_obsolete(c, fn->raw); 205 jffs2_mark_node_obsolete(c, fn->raw);
211 jffs2_free_full_dnode(fn); 206 jffs2_free_full_dnode(fn);
212 jffs2_complete_reservation(c); 207 jffs2_complete_reservation(c);
208 mutex_unlock(&f->sem);
213 goto out_page; 209 goto out_page;
214 } 210 }
215 jffs2_complete_reservation(c); 211 jffs2_complete_reservation(c);
216 inode->i_size = pageofs; 212 inode->i_size = pageofs;
213 mutex_unlock(&f->sem);
217 } 214 }
218 215
219 /* 216 /*
@@ -222,18 +219,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
222 * case of a short-copy. 219 * case of a short-copy.
223 */ 220 */
224 if (!PageUptodate(pg)) { 221 if (!PageUptodate(pg)) {
222 mutex_lock(&f->sem);
225 ret = jffs2_do_readpage_nolock(inode, pg); 223 ret = jffs2_do_readpage_nolock(inode, pg);
224 mutex_unlock(&f->sem);
226 if (ret) 225 if (ret)
227 goto out_page; 226 goto out_page;
228 } 227 }
229 mutex_unlock(&f->sem);
230 jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags); 228 jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
231 return ret; 229 return ret;
232 230
233out_page: 231out_page:
234 unlock_page(pg); 232 unlock_page(pg);
235 page_cache_release(pg); 233 page_cache_release(pg);
236 mutex_unlock(&f->sem);
237 return ret; 234 return ret;
238} 235}
239 236
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 5a2dec2b064c..95d5880a63ee 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -1296,14 +1296,17 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
1296 BUG_ON(start > orig_start); 1296 BUG_ON(start > orig_start);
1297 } 1297 }
1298 1298
1299 /* First, use readpage() to read the appropriate page into the page cache */ 1299 /* The rules state that we must obtain the page lock *before* f->sem, so
1300 /* Q: What happens if we actually try to GC the _same_ page for which commit_write() 1300 * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's
1301 * triggered garbage collection in the first place? 1301 * actually going to *change* so we're safe; we only allow reading.
1302 * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the 1302 *
1303 * page OK. We'll actually write it out again in commit_write, which is a little 1303 * It is important to note that jffs2_write_begin() will ensure that its
1304 * suboptimal, but at least we're correct. 1304 * page is marked Uptodate before allocating space. That means that if we
1305 */ 1305 * end up here trying to GC the *same* page that jffs2_write_begin() is
1306 * trying to write out, read_cache_page() will not deadlock. */
1307 mutex_unlock(&f->sem);
1306 pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg); 1308 pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg);
1309 mutex_lock(&f->sem);
1307 1310
1308 if (IS_ERR(pg_ptr)) { 1311 if (IS_ERR(pg_ptr)) {
1309 pr_warn("read_cache_page() returned error: %ld\n", 1312 pr_warn("read_cache_page() returned error: %ld\n",
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
index fa35ff79ab35..0637271f3770 100644
--- a/fs/jffs2/nodelist.h
+++ b/fs/jffs2/nodelist.h
@@ -194,6 +194,7 @@ struct jffs2_inode_cache {
194#define INO_STATE_CLEARING 6 /* In clear_inode() */ 194#define INO_STATE_CLEARING 6 /* In clear_inode() */
195 195
196#define INO_FLAGS_XATTR_CHECKED 0x01 /* has no duplicate xattr_ref */ 196#define INO_FLAGS_XATTR_CHECKED 0x01 /* has no duplicate xattr_ref */
197#define INO_FLAGS_IS_DIR 0x02 /* is a directory */
197 198
198#define RAWNODE_CLASS_INODE_CACHE 0 199#define RAWNODE_CLASS_INODE_CACHE 0
199#define RAWNODE_CLASS_XATTR_DATUM 1 200#define RAWNODE_CLASS_XATTR_DATUM 1
@@ -249,7 +250,10 @@ struct jffs2_readinode_info
249 250
250struct jffs2_full_dirent 251struct jffs2_full_dirent
251{ 252{
252 struct jffs2_raw_node_ref *raw; 253 union {
254 struct jffs2_raw_node_ref *raw;
255 struct jffs2_inode_cache *ic; /* Just during part of build */
256 };
253 struct jffs2_full_dirent *next; 257 struct jffs2_full_dirent *next;
254 uint32_t version; 258 uint32_t version;
255 uint32_t ino; /* == zero for unlink */ 259 uint32_t ino; /* == zero for unlink */
diff --git a/fs/namei.c b/fs/namei.c
index f624d132e01e..9c590e0f66e9 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1712,6 +1712,11 @@ static inline int should_follow_link(struct nameidata *nd, struct path *link,
1712 return 0; 1712 return 0;
1713 if (!follow) 1713 if (!follow)
1714 return 0; 1714 return 0;
1715 /* make sure that d_is_symlink above matches inode */
1716 if (nd->flags & LOOKUP_RCU) {
1717 if (read_seqcount_retry(&link->dentry->d_seq, seq))
1718 return -ECHILD;
1719 }
1715 return pick_link(nd, link, inode, seq); 1720 return pick_link(nd, link, inode, seq);
1716} 1721}
1717 1722
@@ -1743,11 +1748,11 @@ static int walk_component(struct nameidata *nd, int flags)
1743 if (err < 0) 1748 if (err < 0)
1744 return err; 1749 return err;
1745 1750
1746 inode = d_backing_inode(path.dentry);
1747 seq = 0; /* we are already out of RCU mode */ 1751 seq = 0; /* we are already out of RCU mode */
1748 err = -ENOENT; 1752 err = -ENOENT;
1749 if (d_is_negative(path.dentry)) 1753 if (d_is_negative(path.dentry))
1750 goto out_path_put; 1754 goto out_path_put;
1755 inode = d_backing_inode(path.dentry);
1751 } 1756 }
1752 1757
1753 if (flags & WALK_PUT) 1758 if (flags & WALK_PUT)
@@ -3192,12 +3197,12 @@ retry_lookup:
3192 return error; 3197 return error;
3193 3198
3194 BUG_ON(nd->flags & LOOKUP_RCU); 3199 BUG_ON(nd->flags & LOOKUP_RCU);
3195 inode = d_backing_inode(path.dentry);
3196 seq = 0; /* out of RCU mode, so the value doesn't matter */ 3200 seq = 0; /* out of RCU mode, so the value doesn't matter */
3197 if (unlikely(d_is_negative(path.dentry))) { 3201 if (unlikely(d_is_negative(path.dentry))) {
3198 path_to_nameidata(&path, nd); 3202 path_to_nameidata(&path, nd);
3199 return -ENOENT; 3203 return -ENOENT;
3200 } 3204 }
3205 inode = d_backing_inode(path.dentry);
3201finish_lookup: 3206finish_lookup:
3202 if (nd->depth) 3207 if (nd->depth)
3203 put_link(nd); 3208 put_link(nd);
@@ -3206,11 +3211,6 @@ finish_lookup:
3206 if (unlikely(error)) 3211 if (unlikely(error))
3207 return error; 3212 return error;
3208 3213
3209 if (unlikely(d_is_symlink(path.dentry)) && !(open_flag & O_PATH)) {
3210 path_to_nameidata(&path, nd);
3211 return -ELOOP;
3212 }
3213
3214 if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) { 3214 if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) {
3215 path_to_nameidata(&path, nd); 3215 path_to_nameidata(&path, nd);
3216 } else { 3216 } else {
@@ -3229,6 +3229,10 @@ finish_open:
3229 return error; 3229 return error;
3230 } 3230 }
3231 audit_inode(nd->name, nd->path.dentry, 0); 3231 audit_inode(nd->name, nd->path.dentry, 0);
3232 if (unlikely(d_is_symlink(nd->path.dentry)) && !(open_flag & O_PATH)) {
3233 error = -ELOOP;
3234 goto out;
3235 }
3232 error = -EISDIR; 3236 error = -EISDIR;
3233 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry)) 3237 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
3234 goto out; 3238 goto out;
@@ -3273,6 +3277,10 @@ opened:
3273 goto exit_fput; 3277 goto exit_fput;
3274 } 3278 }
3275out: 3279out:
3280 if (unlikely(error > 0)) {
3281 WARN_ON(1);
3282 error = -EINVAL;
3283 }
3276 if (got_write) 3284 if (got_write)
3277 mnt_drop_write(nd->path.mnt); 3285 mnt_drop_write(nd->path.mnt);
3278 path_put(&save_parent); 3286 path_put(&save_parent);
diff --git a/fs/nfs/blocklayout/extent_tree.c b/fs/nfs/blocklayout/extent_tree.c
index c59a59c37f3d..35ab51c04814 100644
--- a/fs/nfs/blocklayout/extent_tree.c
+++ b/fs/nfs/blocklayout/extent_tree.c
@@ -476,6 +476,7 @@ static void ext_tree_free_commitdata(struct nfs4_layoutcommit_args *arg,
476 476
477 for (i = 0; i < nr_pages; i++) 477 for (i = 0; i < nr_pages; i++)
478 put_page(arg->layoutupdate_pages[i]); 478 put_page(arg->layoutupdate_pages[i]);
479 vfree(arg->start_p);
479 kfree(arg->layoutupdate_pages); 480 kfree(arg->layoutupdate_pages);
480 } else { 481 } else {
481 put_page(arg->layoutupdate_page); 482 put_page(arg->layoutupdate_page);
@@ -559,10 +560,15 @@ retry:
559 560
560 if (unlikely(arg->layoutupdate_pages != &arg->layoutupdate_page)) { 561 if (unlikely(arg->layoutupdate_pages != &arg->layoutupdate_page)) {
561 void *p = start_p, *end = p + arg->layoutupdate_len; 562 void *p = start_p, *end = p + arg->layoutupdate_len;
563 struct page *page = NULL;
562 int i = 0; 564 int i = 0;
563 565
564 for ( ; p < end; p += PAGE_SIZE) 566 arg->start_p = start_p;
565 arg->layoutupdate_pages[i++] = vmalloc_to_page(p); 567 for ( ; p < end; p += PAGE_SIZE) {
568 page = vmalloc_to_page(p);
569 arg->layoutupdate_pages[i++] = page;
570 get_page(page);
571 }
566 } 572 }
567 573
568 dprintk("%s found %zu ranges\n", __func__, count); 574 dprintk("%s found %zu ranges\n", __func__, count);
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index bd25dc7077f7..dff83460e5a6 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -16,29 +16,8 @@
16 16
17#define NFSDBG_FACILITY NFSDBG_PROC 17#define NFSDBG_FACILITY NFSDBG_PROC
18 18
19static int nfs42_set_rw_stateid(nfs4_stateid *dst, struct file *file,
20 fmode_t fmode)
21{
22 struct nfs_open_context *open;
23 struct nfs_lock_context *lock;
24 int ret;
25
26 open = get_nfs_open_context(nfs_file_open_context(file));
27 lock = nfs_get_lock_context(open);
28 if (IS_ERR(lock)) {
29 put_nfs_open_context(open);
30 return PTR_ERR(lock);
31 }
32
33 ret = nfs4_set_rw_stateid(dst, open, lock, fmode);
34
35 nfs_put_lock_context(lock);
36 put_nfs_open_context(open);
37 return ret;
38}
39
40static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, 19static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
41 loff_t offset, loff_t len) 20 struct nfs_lock_context *lock, loff_t offset, loff_t len)
42{ 21{
43 struct inode *inode = file_inode(filep); 22 struct inode *inode = file_inode(filep);
44 struct nfs_server *server = NFS_SERVER(inode); 23 struct nfs_server *server = NFS_SERVER(inode);
@@ -56,7 +35,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
56 msg->rpc_argp = &args; 35 msg->rpc_argp = &args;
57 msg->rpc_resp = &res; 36 msg->rpc_resp = &res;
58 37
59 status = nfs42_set_rw_stateid(&args.falloc_stateid, filep, FMODE_WRITE); 38 status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context,
39 lock, FMODE_WRITE);
60 if (status) 40 if (status)
61 return status; 41 return status;
62 42
@@ -78,15 +58,26 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
78{ 58{
79 struct nfs_server *server = NFS_SERVER(file_inode(filep)); 59 struct nfs_server *server = NFS_SERVER(file_inode(filep));
80 struct nfs4_exception exception = { }; 60 struct nfs4_exception exception = { };
61 struct nfs_lock_context *lock;
81 int err; 62 int err;
82 63
64 lock = nfs_get_lock_context(nfs_file_open_context(filep));
65 if (IS_ERR(lock))
66 return PTR_ERR(lock);
67
68 exception.inode = file_inode(filep);
69 exception.state = lock->open_context->state;
70
83 do { 71 do {
84 err = _nfs42_proc_fallocate(msg, filep, offset, len); 72 err = _nfs42_proc_fallocate(msg, filep, lock, offset, len);
85 if (err == -ENOTSUPP) 73 if (err == -ENOTSUPP) {
86 return -EOPNOTSUPP; 74 err = -EOPNOTSUPP;
75 break;
76 }
87 err = nfs4_handle_exception(server, err, &exception); 77 err = nfs4_handle_exception(server, err, &exception);
88 } while (exception.retry); 78 } while (exception.retry);
89 79
80 nfs_put_lock_context(lock);
90 return err; 81 return err;
91} 82}
92 83
@@ -135,7 +126,8 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
135 return err; 126 return err;
136} 127}
137 128
138static loff_t _nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) 129static loff_t _nfs42_proc_llseek(struct file *filep,
130 struct nfs_lock_context *lock, loff_t offset, int whence)
139{ 131{
140 struct inode *inode = file_inode(filep); 132 struct inode *inode = file_inode(filep);
141 struct nfs42_seek_args args = { 133 struct nfs42_seek_args args = {
@@ -156,7 +148,8 @@ static loff_t _nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
156 if (!nfs_server_capable(inode, NFS_CAP_SEEK)) 148 if (!nfs_server_capable(inode, NFS_CAP_SEEK))
157 return -ENOTSUPP; 149 return -ENOTSUPP;
158 150
159 status = nfs42_set_rw_stateid(&args.sa_stateid, filep, FMODE_READ); 151 status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context,
152 lock, FMODE_READ);
160 if (status) 153 if (status)
161 return status; 154 return status;
162 155
@@ -175,17 +168,28 @@ loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
175{ 168{
176 struct nfs_server *server = NFS_SERVER(file_inode(filep)); 169 struct nfs_server *server = NFS_SERVER(file_inode(filep));
177 struct nfs4_exception exception = { }; 170 struct nfs4_exception exception = { };
171 struct nfs_lock_context *lock;
178 loff_t err; 172 loff_t err;
179 173
174 lock = nfs_get_lock_context(nfs_file_open_context(filep));
175 if (IS_ERR(lock))
176 return PTR_ERR(lock);
177
178 exception.inode = file_inode(filep);
179 exception.state = lock->open_context->state;
180
180 do { 181 do {
181 err = _nfs42_proc_llseek(filep, offset, whence); 182 err = _nfs42_proc_llseek(filep, lock, offset, whence);
182 if (err >= 0) 183 if (err >= 0)
183 break; 184 break;
184 if (err == -ENOTSUPP) 185 if (err == -ENOTSUPP) {
185 return -EOPNOTSUPP; 186 err = -EOPNOTSUPP;
187 break;
188 }
186 err = nfs4_handle_exception(server, err, &exception); 189 err = nfs4_handle_exception(server, err, &exception);
187 } while (exception.retry); 190 } while (exception.retry);
188 191
192 nfs_put_lock_context(lock);
189 return err; 193 return err;
190} 194}
191 195
@@ -298,8 +302,9 @@ int nfs42_proc_layoutstats_generic(struct nfs_server *server,
298} 302}
299 303
300static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f, 304static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
301 struct file *dst_f, loff_t src_offset, 305 struct file *dst_f, struct nfs_lock_context *src_lock,
302 loff_t dst_offset, loff_t count) 306 struct nfs_lock_context *dst_lock, loff_t src_offset,
307 loff_t dst_offset, loff_t count)
303{ 308{
304 struct inode *src_inode = file_inode(src_f); 309 struct inode *src_inode = file_inode(src_f);
305 struct inode *dst_inode = file_inode(dst_f); 310 struct inode *dst_inode = file_inode(dst_f);
@@ -320,11 +325,13 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
320 msg->rpc_argp = &args; 325 msg->rpc_argp = &args;
321 msg->rpc_resp = &res; 326 msg->rpc_resp = &res;
322 327
323 status = nfs42_set_rw_stateid(&args.src_stateid, src_f, FMODE_READ); 328 status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
329 src_lock, FMODE_READ);
324 if (status) 330 if (status)
325 return status; 331 return status;
326 332
327 status = nfs42_set_rw_stateid(&args.dst_stateid, dst_f, FMODE_WRITE); 333 status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
334 dst_lock, FMODE_WRITE);
328 if (status) 335 if (status)
329 return status; 336 return status;
330 337
@@ -349,22 +356,48 @@ int nfs42_proc_clone(struct file *src_f, struct file *dst_f,
349 }; 356 };
350 struct inode *inode = file_inode(src_f); 357 struct inode *inode = file_inode(src_f);
351 struct nfs_server *server = NFS_SERVER(file_inode(src_f)); 358 struct nfs_server *server = NFS_SERVER(file_inode(src_f));
352 struct nfs4_exception exception = { }; 359 struct nfs_lock_context *src_lock;
353 int err; 360 struct nfs_lock_context *dst_lock;
361 struct nfs4_exception src_exception = { };
362 struct nfs4_exception dst_exception = { };
363 int err, err2;
354 364
355 if (!nfs_server_capable(inode, NFS_CAP_CLONE)) 365 if (!nfs_server_capable(inode, NFS_CAP_CLONE))
356 return -EOPNOTSUPP; 366 return -EOPNOTSUPP;
357 367
368 src_lock = nfs_get_lock_context(nfs_file_open_context(src_f));
369 if (IS_ERR(src_lock))
370 return PTR_ERR(src_lock);
371
372 src_exception.inode = file_inode(src_f);
373 src_exception.state = src_lock->open_context->state;
374
375 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f));
376 if (IS_ERR(dst_lock)) {
377 err = PTR_ERR(dst_lock);
378 goto out_put_src_lock;
379 }
380
381 dst_exception.inode = file_inode(dst_f);
382 dst_exception.state = dst_lock->open_context->state;
383
358 do { 384 do {
359 err = _nfs42_proc_clone(&msg, src_f, dst_f, src_offset, 385 err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock,
360 dst_offset, count); 386 src_offset, dst_offset, count);
361 if (err == -ENOTSUPP || err == -EOPNOTSUPP) { 387 if (err == -ENOTSUPP || err == -EOPNOTSUPP) {
362 NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE; 388 NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE;
363 return -EOPNOTSUPP; 389 err = -EOPNOTSUPP;
390 break;
364 } 391 }
365 err = nfs4_handle_exception(server, err, &exception);
366 } while (exception.retry);
367 392
368 return err; 393 err2 = nfs4_handle_exception(server, err, &src_exception);
394 err = nfs4_handle_exception(server, err, &dst_exception);
395 if (!err)
396 err = err2;
397 } while (src_exception.retry || dst_exception.retry);
369 398
399 nfs_put_lock_context(dst_lock);
400out_put_src_lock:
401 nfs_put_lock_context(src_lock);
402 return err;
370} 403}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 4bfc33ad0563..14881594dd07 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2466,9 +2466,9 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
2466 dentry = d_add_unique(dentry, igrab(state->inode)); 2466 dentry = d_add_unique(dentry, igrab(state->inode));
2467 if (dentry == NULL) { 2467 if (dentry == NULL) {
2468 dentry = opendata->dentry; 2468 dentry = opendata->dentry;
2469 } else if (dentry != ctx->dentry) { 2469 } else {
2470 dput(ctx->dentry); 2470 dput(ctx->dentry);
2471 ctx->dentry = dget(dentry); 2471 ctx->dentry = dentry;
2472 } 2472 }
2473 nfs_set_verifier(dentry, 2473 nfs_set_verifier(dentry,
2474 nfs_save_change_attribute(d_inode(opendata->dir))); 2474 nfs_save_change_attribute(d_inode(opendata->dir)));
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 482b6e94bb37..2fa483e6dbe2 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -252,6 +252,27 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
252 } 252 }
253} 253}
254 254
255/*
256 * Mark a pnfs_layout_hdr and all associated layout segments as invalid
257 *
258 * In order to continue using the pnfs_layout_hdr, a full recovery
259 * is required.
260 * Note that caller must hold inode->i_lock.
261 */
262static int
263pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
264 struct list_head *lseg_list)
265{
266 struct pnfs_layout_range range = {
267 .iomode = IOMODE_ANY,
268 .offset = 0,
269 .length = NFS4_MAX_UINT64,
270 };
271
272 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
273 return pnfs_mark_matching_lsegs_invalid(lo, lseg_list, &range);
274}
275
255static int 276static int
256pnfs_iomode_to_fail_bit(u32 iomode) 277pnfs_iomode_to_fail_bit(u32 iomode)
257{ 278{
@@ -554,9 +575,8 @@ pnfs_destroy_layout(struct nfs_inode *nfsi)
554 spin_lock(&nfsi->vfs_inode.i_lock); 575 spin_lock(&nfsi->vfs_inode.i_lock);
555 lo = nfsi->layout; 576 lo = nfsi->layout;
556 if (lo) { 577 if (lo) {
557 lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
558 pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
559 pnfs_get_layout_hdr(lo); 578 pnfs_get_layout_hdr(lo);
579 pnfs_mark_layout_stateid_invalid(lo, &tmp_list);
560 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED); 580 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
561 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED); 581 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
562 spin_unlock(&nfsi->vfs_inode.i_lock); 582 spin_unlock(&nfsi->vfs_inode.i_lock);
@@ -617,11 +637,6 @@ pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
617{ 637{
618 struct pnfs_layout_hdr *lo; 638 struct pnfs_layout_hdr *lo;
619 struct inode *inode; 639 struct inode *inode;
620 struct pnfs_layout_range range = {
621 .iomode = IOMODE_ANY,
622 .offset = 0,
623 .length = NFS4_MAX_UINT64,
624 };
625 LIST_HEAD(lseg_list); 640 LIST_HEAD(lseg_list);
626 int ret = 0; 641 int ret = 0;
627 642
@@ -636,11 +651,11 @@ pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
636 651
637 spin_lock(&inode->i_lock); 652 spin_lock(&inode->i_lock);
638 list_del_init(&lo->plh_bulk_destroy); 653 list_del_init(&lo->plh_bulk_destroy);
639 lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */ 654 if (pnfs_mark_layout_stateid_invalid(lo, &lseg_list)) {
640 if (is_bulk_recall) 655 if (is_bulk_recall)
641 set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags); 656 set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
642 if (pnfs_mark_matching_lsegs_invalid(lo, &lseg_list, &range))
643 ret = -EAGAIN; 657 ret = -EAGAIN;
658 }
644 spin_unlock(&inode->i_lock); 659 spin_unlock(&inode->i_lock);
645 pnfs_free_lseg_list(&lseg_list); 660 pnfs_free_lseg_list(&lseg_list);
646 /* Free all lsegs that are attached to commit buckets */ 661 /* Free all lsegs that are attached to commit buckets */
@@ -1738,8 +1753,19 @@ pnfs_set_plh_return_iomode(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode)
1738 if (lo->plh_return_iomode != 0) 1753 if (lo->plh_return_iomode != 0)
1739 iomode = IOMODE_ANY; 1754 iomode = IOMODE_ANY;
1740 lo->plh_return_iomode = iomode; 1755 lo->plh_return_iomode = iomode;
1756 set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
1741} 1757}
1742 1758
1759/**
1760 * pnfs_mark_matching_lsegs_return - Free or return matching layout segments
1761 * @lo: pointer to layout header
1762 * @tmp_list: list header to be used with pnfs_free_lseg_list()
1763 * @return_range: describe layout segment ranges to be returned
1764 *
1765 * This function is mainly intended for use by layoutrecall. It attempts
1766 * to free the layout segment immediately, or else to mark it for return
1767 * as soon as its reference count drops to zero.
1768 */
1743int 1769int
1744pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo, 1770pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
1745 struct list_head *tmp_list, 1771 struct list_head *tmp_list,
@@ -1762,12 +1788,11 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
1762 lseg, lseg->pls_range.iomode, 1788 lseg, lseg->pls_range.iomode,
1763 lseg->pls_range.offset, 1789 lseg->pls_range.offset,
1764 lseg->pls_range.length); 1790 lseg->pls_range.length);
1791 if (mark_lseg_invalid(lseg, tmp_list))
1792 continue;
1793 remaining++;
1765 set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags); 1794 set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
1766 pnfs_set_plh_return_iomode(lo, return_range->iomode); 1795 pnfs_set_plh_return_iomode(lo, return_range->iomode);
1767 if (!mark_lseg_invalid(lseg, tmp_list))
1768 remaining++;
1769 set_bit(NFS_LAYOUT_RETURN_REQUESTED,
1770 &lo->plh_flags);
1771 } 1796 }
1772 return remaining; 1797 return remaining;
1773} 1798}
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index cfcbf114676e..7115c5d7d373 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -91,7 +91,14 @@
91#include <linux/fsnotify_backend.h> 91#include <linux/fsnotify_backend.h>
92#include "fsnotify.h" 92#include "fsnotify.h"
93 93
94#define FSNOTIFY_REAPER_DELAY (1) /* 1 jiffy */
95
94struct srcu_struct fsnotify_mark_srcu; 96struct srcu_struct fsnotify_mark_srcu;
97static DEFINE_SPINLOCK(destroy_lock);
98static LIST_HEAD(destroy_list);
99
100static void fsnotify_mark_destroy(struct work_struct *work);
101static DECLARE_DELAYED_WORK(reaper_work, fsnotify_mark_destroy);
95 102
96void fsnotify_get_mark(struct fsnotify_mark *mark) 103void fsnotify_get_mark(struct fsnotify_mark *mark)
97{ 104{
@@ -165,19 +172,10 @@ void fsnotify_detach_mark(struct fsnotify_mark *mark)
165 atomic_dec(&group->num_marks); 172 atomic_dec(&group->num_marks);
166} 173}
167 174
168static void
169fsnotify_mark_free_rcu(struct rcu_head *rcu)
170{
171 struct fsnotify_mark *mark;
172
173 mark = container_of(rcu, struct fsnotify_mark, g_rcu);
174 fsnotify_put_mark(mark);
175}
176
177/* 175/*
178 * Free fsnotify mark. The freeing is actually happening from a call_srcu 176 * Free fsnotify mark. The freeing is actually happening from a kthread which
179 * callback. Caller must have a reference to the mark or be protected by 177 * first waits for srcu period end. Caller must have a reference to the mark
180 * fsnotify_mark_srcu. 178 * or be protected by fsnotify_mark_srcu.
181 */ 179 */
182void fsnotify_free_mark(struct fsnotify_mark *mark) 180void fsnotify_free_mark(struct fsnotify_mark *mark)
183{ 181{
@@ -192,7 +190,11 @@ void fsnotify_free_mark(struct fsnotify_mark *mark)
192 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE; 190 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
193 spin_unlock(&mark->lock); 191 spin_unlock(&mark->lock);
194 192
195 call_srcu(&fsnotify_mark_srcu, &mark->g_rcu, fsnotify_mark_free_rcu); 193 spin_lock(&destroy_lock);
194 list_add(&mark->g_list, &destroy_list);
195 spin_unlock(&destroy_lock);
196 queue_delayed_work(system_unbound_wq, &reaper_work,
197 FSNOTIFY_REAPER_DELAY);
196 198
197 /* 199 /*
198 * Some groups like to know that marks are being freed. This is a 200 * Some groups like to know that marks are being freed. This is a
@@ -388,7 +390,12 @@ err:
388 390
389 spin_unlock(&mark->lock); 391 spin_unlock(&mark->lock);
390 392
391 call_srcu(&fsnotify_mark_srcu, &mark->g_rcu, fsnotify_mark_free_rcu); 393 spin_lock(&destroy_lock);
394 list_add(&mark->g_list, &destroy_list);
395 spin_unlock(&destroy_lock);
396 queue_delayed_work(system_unbound_wq, &reaper_work,
397 FSNOTIFY_REAPER_DELAY);
398
392 return ret; 399 return ret;
393} 400}
394 401
@@ -491,3 +498,21 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
491 atomic_set(&mark->refcnt, 1); 498 atomic_set(&mark->refcnt, 1);
492 mark->free_mark = free_mark; 499 mark->free_mark = free_mark;
493} 500}
501
502static void fsnotify_mark_destroy(struct work_struct *work)
503{
504 struct fsnotify_mark *mark, *next;
505 struct list_head private_destroy_list;
506
507 spin_lock(&destroy_lock);
508 /* exchange the list head */
509 list_replace_init(&destroy_list, &private_destroy_list);
510 spin_unlock(&destroy_lock);
511
512 synchronize_srcu(&fsnotify_mark_srcu);
513
514 list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) {
515 list_del_init(&mark->g_list);
516 fsnotify_put_mark(mark);
517 }
518}
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 794fd1587f34..cda0361e95a4 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -956,6 +956,7 @@ clean_orphan:
956 tmp_ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh, 956 tmp_ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh,
957 update_isize, end); 957 update_isize, end);
958 if (tmp_ret < 0) { 958 if (tmp_ret < 0) {
959 ocfs2_inode_unlock(inode, 1);
959 ret = tmp_ret; 960 ret = tmp_ret;
960 mlog_errno(ret); 961 mlog_errno(ret);
961 brelse(di_bh); 962 brelse(di_bh);
diff --git a/fs/pnode.c b/fs/pnode.c
index 6367e1e435c6..c524fdddc7fb 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -202,6 +202,11 @@ static struct mount *last_dest, *last_source, *dest_master;
202static struct mountpoint *mp; 202static struct mountpoint *mp;
203static struct hlist_head *list; 203static struct hlist_head *list;
204 204
205static inline bool peers(struct mount *m1, struct mount *m2)
206{
207 return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id;
208}
209
205static int propagate_one(struct mount *m) 210static int propagate_one(struct mount *m)
206{ 211{
207 struct mount *child; 212 struct mount *child;
@@ -212,7 +217,7 @@ static int propagate_one(struct mount *m)
212 /* skip if mountpoint isn't covered by it */ 217 /* skip if mountpoint isn't covered by it */
213 if (!is_subdir(mp->m_dentry, m->mnt.mnt_root)) 218 if (!is_subdir(mp->m_dentry, m->mnt.mnt_root))
214 return 0; 219 return 0;
215 if (m->mnt_group_id == last_dest->mnt_group_id) { 220 if (peers(m, last_dest)) {
216 type = CL_MAKE_SHARED; 221 type = CL_MAKE_SHARED;
217 } else { 222 } else {
218 struct mount *n, *p; 223 struct mount *n, *p;
@@ -223,7 +228,7 @@ static int propagate_one(struct mount *m)
223 last_source = last_source->mnt_master; 228 last_source = last_source->mnt_master;
224 last_dest = last_source->mnt_parent; 229 last_dest = last_source->mnt_parent;
225 } 230 }
226 if (n->mnt_group_id != last_dest->mnt_group_id) { 231 if (!peers(n, last_dest)) {
227 last_source = last_source->mnt_master; 232 last_source = last_source->mnt_master;
228 last_dest = last_source->mnt_parent; 233 last_dest = last_source->mnt_parent;
229 } 234 }
diff --git a/fs/read_write.c b/fs/read_write.c
index 324ec271cc4e..dadf24e5c95b 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -17,6 +17,7 @@
17#include <linux/splice.h> 17#include <linux/splice.h>
18#include <linux/compat.h> 18#include <linux/compat.h>
19#include <linux/mount.h> 19#include <linux/mount.h>
20#include <linux/fs.h>
20#include "internal.h" 21#include "internal.h"
21 22
22#include <asm/uaccess.h> 23#include <asm/uaccess.h>
@@ -183,7 +184,7 @@ loff_t no_seek_end_llseek(struct file *file, loff_t offset, int whence)
183 switch (whence) { 184 switch (whence) {
184 case SEEK_SET: case SEEK_CUR: 185 case SEEK_SET: case SEEK_CUR:
185 return generic_file_llseek_size(file, offset, whence, 186 return generic_file_llseek_size(file, offset, whence,
186 ~0ULL, 0); 187 OFFSET_MAX, 0);
187 default: 188 default:
188 return -EINVAL; 189 return -EINVAL;
189 } 190 }
@@ -1532,10 +1533,12 @@ int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
1532 1533
1533 if (!(file_in->f_mode & FMODE_READ) || 1534 if (!(file_in->f_mode & FMODE_READ) ||
1534 !(file_out->f_mode & FMODE_WRITE) || 1535 !(file_out->f_mode & FMODE_WRITE) ||
1535 (file_out->f_flags & O_APPEND) || 1536 (file_out->f_flags & O_APPEND))
1536 !file_in->f_op->clone_file_range)
1537 return -EBADF; 1537 return -EBADF;
1538 1538
1539 if (!file_in->f_op->clone_file_range)
1540 return -EOPNOTSUPP;
1541
1539 ret = clone_verify_area(file_in, pos_in, len, false); 1542 ret = clone_verify_area(file_in, pos_in, len, false);
1540 if (ret) 1543 if (ret)
1541 return ret; 1544 return ret;
diff --git a/fs/super.c b/fs/super.c
index 1182af8fd5ff..74914b1bae70 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -415,6 +415,7 @@ void generic_shutdown_super(struct super_block *sb)
415 sb->s_flags &= ~MS_ACTIVE; 415 sb->s_flags &= ~MS_ACTIVE;
416 416
417 fsnotify_unmount_inodes(sb); 417 fsnotify_unmount_inodes(sb);
418 cgroup_writeback_umount();
418 419
419 evict_inodes(sb); 420 evict_inodes(sb);
420 421
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 50311703135b..66cdb44616d5 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -287,6 +287,12 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
287 goto out; 287 goto out;
288 288
289 /* 289 /*
290 * We don't do userfault handling for the final child pid update.
291 */
292 if (current->flags & PF_EXITING)
293 goto out;
294
295 /*
290 * Check that we can return VM_FAULT_RETRY. 296 * Check that we can return VM_FAULT_RETRY.
291 * 297 *
292 * NOTE: it should become possible to return VM_FAULT_RETRY 298 * NOTE: it should become possible to return VM_FAULT_RETRY
diff --git a/fs/xattr.c b/fs/xattr.c
index 07d0e47f6a7f..4861322e28e8 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -940,7 +940,7 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
940 bool trusted = capable(CAP_SYS_ADMIN); 940 bool trusted = capable(CAP_SYS_ADMIN);
941 struct simple_xattr *xattr; 941 struct simple_xattr *xattr;
942 ssize_t remaining_size = size; 942 ssize_t remaining_size = size;
943 int err; 943 int err = 0;
944 944
945#ifdef CONFIG_FS_POSIX_ACL 945#ifdef CONFIG_FS_POSIX_ACL
946 if (inode->i_acl) { 946 if (inode->i_acl) {
@@ -965,11 +965,11 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
965 965
966 err = xattr_list_one(&buffer, &remaining_size, xattr->name); 966 err = xattr_list_one(&buffer, &remaining_size, xattr->name);
967 if (err) 967 if (err)
968 return err; 968 break;
969 } 969 }
970 spin_unlock(&xattrs->lock); 970 spin_unlock(&xattrs->lock);
971 971
972 return size - remaining_size; 972 return err ? err : size - remaining_size;
973} 973}
974 974
975/* 975/*
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 379c089fb051..a9ebabfe7587 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -55,7 +55,7 @@ xfs_count_page_state(
55 } while ((bh = bh->b_this_page) != head); 55 } while ((bh = bh->b_this_page) != head);
56} 56}
57 57
58STATIC struct block_device * 58struct block_device *
59xfs_find_bdev_for_inode( 59xfs_find_bdev_for_inode(
60 struct inode *inode) 60 struct inode *inode)
61{ 61{
@@ -1208,6 +1208,10 @@ xfs_vm_writepages(
1208 struct writeback_control *wbc) 1208 struct writeback_control *wbc)
1209{ 1209{
1210 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); 1210 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1211 if (dax_mapping(mapping))
1212 return dax_writeback_mapping_range(mapping,
1213 xfs_find_bdev_for_inode(mapping->host), wbc);
1214
1211 return generic_writepages(mapping, wbc); 1215 return generic_writepages(mapping, wbc);
1212} 1216}
1213 1217
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index f6ffc9ae5ceb..a4343c63fb38 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -62,5 +62,6 @@ int xfs_get_blocks_dax_fault(struct inode *inode, sector_t offset,
62 struct buffer_head *map_bh, int create); 62 struct buffer_head *map_bh, int create);
63 63
64extern void xfs_count_page_state(struct page *, int *, int *); 64extern void xfs_count_page_state(struct page *, int *, int *);
65extern struct block_device *xfs_find_bdev_for_inode(struct inode *);
65 66
66#endif /* __XFS_AOPS_H__ */ 67#endif /* __XFS_AOPS_H__ */
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 45ec9e40150c..6c876012b2e5 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -75,7 +75,8 @@ xfs_zero_extent(
75 ssize_t size = XFS_FSB_TO_B(mp, count_fsb); 75 ssize_t size = XFS_FSB_TO_B(mp, count_fsb);
76 76
77 if (IS_DAX(VFS_I(ip))) 77 if (IS_DAX(VFS_I(ip)))
78 return dax_clear_blocks(VFS_I(ip), block, size); 78 return dax_clear_sectors(xfs_find_bdev_for_inode(VFS_I(ip)),
79 sector, size);
79 80
80 /* 81 /*
81 * let the block layer decide on the fastest method of 82 * let the block layer decide on the fastest method of
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 0b3c0d39ef75..c370b261c720 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -239,6 +239,14 @@ extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
239 pmd_t *pmdp); 239 pmd_t *pmdp);
240#endif 240#endif
241 241
242#ifndef __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
243static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma,
244 unsigned long address, pmd_t *pmdp)
245{
246
247}
248#endif
249
242#ifndef __HAVE_ARCH_PTE_SAME 250#ifndef __HAVE_ARCH_PTE_SAME
243static inline int pte_same(pte_t pte_a, pte_t pte_b) 251static inline int pte_same(pte_t pte_a, pte_t pte_b)
244{ 252{
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index c65a212db77e..c5b4b81a831b 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -1166,6 +1166,7 @@ struct drm_connector {
1166 struct drm_mode_object base; 1166 struct drm_mode_object base;
1167 1167
1168 char *name; 1168 char *name;
1169 int connector_id;
1169 int connector_type; 1170 int connector_type;
1170 int connector_type_id; 1171 int connector_type_id;
1171 bool interlace_allowed; 1172 bool interlace_allowed;
@@ -2047,6 +2048,7 @@ struct drm_mode_config {
2047 struct list_head fb_list; 2048 struct list_head fb_list;
2048 2049
2049 int num_connector; 2050 int num_connector;
2051 struct ida connector_ida;
2050 struct list_head connector_list; 2052 struct list_head connector_list;
2051 int num_encoder; 2053 int num_encoder;
2052 struct list_head encoder_list; 2054 struct list_head encoder_list;
@@ -2200,7 +2202,11 @@ int drm_connector_register(struct drm_connector *connector);
2200void drm_connector_unregister(struct drm_connector *connector); 2202void drm_connector_unregister(struct drm_connector *connector);
2201 2203
2202extern void drm_connector_cleanup(struct drm_connector *connector); 2204extern void drm_connector_cleanup(struct drm_connector *connector);
2203extern unsigned int drm_connector_index(struct drm_connector *connector); 2205static inline unsigned drm_connector_index(struct drm_connector *connector)
2206{
2207 return connector->connector_id;
2208}
2209
2204/* helper to unplug all connectors from sysfs for device */ 2210/* helper to unplug all connectors from sysfs for device */
2205extern void drm_connector_unplug_all(struct drm_device *dev); 2211extern void drm_connector_unplug_all(struct drm_device *dev);
2206 2212
diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h
index 6f45aea49e4f..0a05b0d36ae7 100644
--- a/include/dt-bindings/clock/tegra210-car.h
+++ b/include/dt-bindings/clock/tegra210-car.h
@@ -126,7 +126,7 @@
126/* 104 */ 126/* 104 */
127/* 105 */ 127/* 105 */
128#define TEGRA210_CLK_D_AUDIO 106 128#define TEGRA210_CLK_D_AUDIO 106
129/* 107 ( affects abp -> ape) */ 129#define TEGRA210_CLK_APB2APE 107
130/* 108 */ 130/* 108 */
131/* 109 */ 131/* 109 */
132/* 110 */ 132/* 110 */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index d2992bfa1706..c1a2f345cbe6 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -487,8 +487,8 @@ enum ata_tf_protocols {
487}; 487};
488 488
489enum ata_ioctls { 489enum ata_ioctls {
490 ATA_IOC_GET_IO32 = 0x309, 490 ATA_IOC_GET_IO32 = 0x309, /* HDIO_GET_32BIT */
491 ATA_IOC_SET_IO32 = 0x324, 491 ATA_IOC_SET_IO32 = 0x324, /* HDIO_SET_32BIT */
492}; 492};
493 493
494/* core structures */ 494/* core structures */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 5349e6816cbb..cb6888824108 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -310,6 +310,43 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
310 bio->bi_flags &= ~(1U << bit); 310 bio->bi_flags &= ~(1U << bit);
311} 311}
312 312
313static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
314{
315 *bv = bio_iovec(bio);
316}
317
318static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
319{
320 struct bvec_iter iter = bio->bi_iter;
321 int idx;
322
323 if (!bio_flagged(bio, BIO_CLONED)) {
324 *bv = bio->bi_io_vec[bio->bi_vcnt - 1];
325 return;
326 }
327
328 if (unlikely(!bio_multiple_segments(bio))) {
329 *bv = bio_iovec(bio);
330 return;
331 }
332
333 bio_advance_iter(bio, &iter, iter.bi_size);
334
335 if (!iter.bi_bvec_done)
336 idx = iter.bi_idx - 1;
337 else /* in the middle of bvec */
338 idx = iter.bi_idx;
339
340 *bv = bio->bi_io_vec[idx];
341
342 /*
343 * iter.bi_bvec_done records actual length of the last bvec
344 * if this bio ends in the middle of one io vector
345 */
346 if (iter.bi_bvec_done)
347 bv->bv_len = iter.bi_bvec_done;
348}
349
313enum bip_flags { 350enum bip_flags {
314 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ 351 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
315 BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ 352 BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4571ef1a12a9..413c84fbc4ed 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -895,7 +895,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
895{ 895{
896 struct request_queue *q = rq->q; 896 struct request_queue *q = rq->q;
897 897
898 if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) 898 if (unlikely(rq->cmd_type != REQ_TYPE_FS))
899 return q->limits.max_hw_sectors; 899 return q->limits.max_hw_sectors;
900 900
901 if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD)) 901 if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD))
@@ -1372,6 +1372,13 @@ static inline void put_dev_sector(Sector p)
1372 page_cache_release(p.v); 1372 page_cache_release(p.v);
1373} 1373}
1374 1374
1375static inline bool __bvec_gap_to_prev(struct request_queue *q,
1376 struct bio_vec *bprv, unsigned int offset)
1377{
1378 return offset ||
1379 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
1380}
1381
1375/* 1382/*
1376 * Check if adding a bio_vec after bprv with offset would create a gap in 1383 * Check if adding a bio_vec after bprv with offset would create a gap in
1377 * the SG list. Most drivers don't care about this, but some do. 1384 * the SG list. Most drivers don't care about this, but some do.
@@ -1381,18 +1388,22 @@ static inline bool bvec_gap_to_prev(struct request_queue *q,
1381{ 1388{
1382 if (!queue_virt_boundary(q)) 1389 if (!queue_virt_boundary(q))
1383 return false; 1390 return false;
1384 return offset || 1391 return __bvec_gap_to_prev(q, bprv, offset);
1385 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
1386} 1392}
1387 1393
1388static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, 1394static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
1389 struct bio *next) 1395 struct bio *next)
1390{ 1396{
1391 if (!bio_has_data(prev)) 1397 if (bio_has_data(prev) && queue_virt_boundary(q)) {
1392 return false; 1398 struct bio_vec pb, nb;
1399
1400 bio_get_last_bvec(prev, &pb);
1401 bio_get_first_bvec(next, &nb);
1393 1402
1394 return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1], 1403 return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
1395 next->bi_io_vec[0].bv_offset); 1404 }
1405
1406 return false;
1396} 1407}
1397 1408
1398static inline bool req_gap_back_merge(struct request *req, struct bio *bio) 1409static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index c1ef6f14e7be..15151f3c4120 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -75,6 +75,7 @@
75#define CEPH_FEATURE_CRUSH_TUNABLES5 (1ULL<<58) /* chooseleaf stable mode */ 75#define CEPH_FEATURE_CRUSH_TUNABLES5 (1ULL<<58) /* chooseleaf stable mode */
76// duplicated since it was introduced at the same time as CEPH_FEATURE_CRUSH_TUNABLES5 76// duplicated since it was introduced at the same time as CEPH_FEATURE_CRUSH_TUNABLES5
77#define CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING (1ULL<<58) /* New, v7 encoding */ 77#define CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING (1ULL<<58) /* New, v7 encoding */
78#define CEPH_FEATURE_FS_FILE_LAYOUT_V2 (1ULL<<58) /* file_layout_t */
78 79
79/* 80/*
80 * The introduction of CEPH_FEATURE_OSD_SNAPMAPPER caused the feature 81 * The introduction of CEPH_FEATURE_OSD_SNAPMAPPER caused the feature
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 00b042c49ccd..48f5aab117ae 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -144,7 +144,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
144 */ 144 */
145#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) 145#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
146#define __trace_if(cond) \ 146#define __trace_if(cond) \
147 if (__builtin_constant_p((cond)) ? !!(cond) : \ 147 if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
148 ({ \ 148 ({ \
149 int ______r; \ 149 int ______r; \
150 static struct ftrace_branch_data \ 150 static struct ftrace_branch_data \
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 818e45078929..636dd59ab505 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -7,7 +7,7 @@
7 7
8ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t, 8ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
9 get_block_t, dio_iodone_t, int flags); 9 get_block_t, dio_iodone_t, int flags);
10int dax_clear_blocks(struct inode *, sector_t block, long size); 10int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size);
11int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); 11int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
12int dax_truncate_page(struct inode *, loff_t from, get_block_t); 12int dax_truncate_page(struct inode *, loff_t from, get_block_t);
13int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t, 13int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
@@ -52,6 +52,8 @@ static inline bool dax_mapping(struct address_space *mapping)
52{ 52{
53 return mapping->host && IS_DAX(mapping->host); 53 return mapping->host && IS_DAX(mapping->host);
54} 54}
55int dax_writeback_mapping_range(struct address_space *mapping, loff_t start, 55
56 loff_t end); 56struct writeback_control;
57int dax_writeback_mapping_range(struct address_space *mapping,
58 struct block_device *bdev, struct writeback_control *wbc);
57#endif 59#endif
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 7781ce110503..c4b5f4b3f8f8 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -409,9 +409,7 @@ static inline bool d_mountpoint(const struct dentry *dentry)
409 */ 409 */
410static inline unsigned __d_entry_type(const struct dentry *dentry) 410static inline unsigned __d_entry_type(const struct dentry *dentry)
411{ 411{
412 unsigned type = READ_ONCE(dentry->d_flags); 412 return dentry->d_flags & DCACHE_ENTRY_TYPE;
413 smp_rmb();
414 return type & DCACHE_ENTRY_TYPE;
415} 413}
416 414
417static inline bool d_is_miss(const struct dentry *dentry) 415static inline bool d_is_miss(const struct dentry *dentry)
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 6b7e89f45aa4..533c4408529a 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -220,10 +220,7 @@ struct fsnotify_mark {
220 /* List of marks by group->i_fsnotify_marks. Also reused for queueing 220 /* List of marks by group->i_fsnotify_marks. Also reused for queueing
221 * mark into destroy_list when it's waiting for the end of SRCU period 221 * mark into destroy_list when it's waiting for the end of SRCU period
222 * before it can be freed. [group->mark_mutex] */ 222 * before it can be freed. [group->mark_mutex] */
223 union { 223 struct list_head g_list;
224 struct list_head g_list;
225 struct rcu_head g_rcu;
226 };
227 /* Protects inode / mnt pointers, flags, masks */ 224 /* Protects inode / mnt pointers, flags, masks */
228 spinlock_t lock; 225 spinlock_t lock;
229 /* List of marks for inode / vfsmount [obj_lock] */ 226 /* List of marks for inode / vfsmount [obj_lock] */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 81de7123959d..c2b340e23f62 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -603,6 +603,7 @@ extern int ftrace_arch_read_dyn_info(char *buf, int size);
603 603
604extern int skip_trace(unsigned long ip); 604extern int skip_trace(unsigned long ip);
605extern void ftrace_module_init(struct module *mod); 605extern void ftrace_module_init(struct module *mod);
606extern void ftrace_module_enable(struct module *mod);
606extern void ftrace_release_mod(struct module *mod); 607extern void ftrace_release_mod(struct module *mod);
607 608
608extern void ftrace_disable_daemon(void); 609extern void ftrace_disable_daemon(void);
@@ -612,8 +613,9 @@ static inline int skip_trace(unsigned long ip) { return 0; }
612static inline int ftrace_force_update(void) { return 0; } 613static inline int ftrace_force_update(void) { return 0; }
613static inline void ftrace_disable_daemon(void) { } 614static inline void ftrace_disable_daemon(void) { }
614static inline void ftrace_enable_daemon(void) { } 615static inline void ftrace_enable_daemon(void) { }
615static inline void ftrace_release_mod(struct module *mod) {} 616static inline void ftrace_module_init(struct module *mod) { }
616static inline void ftrace_module_init(struct module *mod) {} 617static inline void ftrace_module_enable(struct module *mod) { }
618static inline void ftrace_release_mod(struct module *mod) { }
617static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) 619static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
618{ 620{
619 return -EINVAL; 621 return -EINVAL;
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 821273ca4873..2d9b650047a5 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -235,6 +235,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
235/* low 64 bit */ 235/* low 64 bit */
236#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT)) 236#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
237 237
238/* PRS_REG */
239#define DMA_PRS_PPR ((u32)1)
240
238#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ 241#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
239do { \ 242do { \
240 cycles_t start_time = get_cycles(); \ 243 cycles_t start_time = get_cycles(); \
diff --git a/include/linux/libata.h b/include/linux/libata.h
index bec2abbd7ab2..2c4ebef79d0c 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -720,7 +720,7 @@ struct ata_device {
720 union { 720 union {
721 u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ 721 u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
722 u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ 722 u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
723 }; 723 } ____cacheline_aligned;
724 724
725 /* DEVSLP Timing Variables from Identify Device Data Log */ 725 /* DEVSLP Timing Variables from Identify Device Data Log */
726 u8 devslp_timing[ATA_LOG_DEVSLP_SIZE]; 726 u8 devslp_timing[ATA_LOG_DEVSLP_SIZE];
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index bed40dff0e86..141ffdd59960 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -26,9 +26,8 @@ enum {
26 26
27 /* need to set a limit somewhere, but yes, this is likely overkill */ 27 /* need to set a limit somewhere, but yes, this is likely overkill */
28 ND_IOCTL_MAX_BUFLEN = SZ_4M, 28 ND_IOCTL_MAX_BUFLEN = SZ_4M,
29 ND_CMD_MAX_ELEM = 4, 29 ND_CMD_MAX_ELEM = 5,
30 ND_CMD_MAX_ENVELOPE = 16, 30 ND_CMD_MAX_ENVELOPE = 16,
31 ND_CMD_ARS_STATUS_MAX = SZ_4K,
32 ND_MAX_MAPPINGS = 32, 31 ND_MAX_MAPPINGS = 32,
33 32
34 /* region flag indicating to direct-map persistent memory by default */ 33 /* region flag indicating to direct-map persistent memory by default */
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index d6750111e48e..2190419bdf0a 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -135,6 +135,10 @@ enum {
135 /* Memory types */ 135 /* Memory types */
136 NVM_ID_FMTYPE_SLC = 0, 136 NVM_ID_FMTYPE_SLC = 0,
137 NVM_ID_FMTYPE_MLC = 1, 137 NVM_ID_FMTYPE_MLC = 1,
138
139 /* Device capabilities */
140 NVM_ID_DCAP_BBLKMGMT = 0x1,
141 NVM_UD_DCAP_ECC = 0x2,
138}; 142};
139 143
140struct nvm_id_lp_mlc { 144struct nvm_id_lp_mlc {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 430a929f048b..a0e8cc8dcc67 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -44,6 +44,8 @@
44 44
45#include <linux/timecounter.h> 45#include <linux/timecounter.h>
46 46
47#define DEFAULT_UAR_PAGE_SHIFT 12
48
47#define MAX_MSIX_P_PORT 17 49#define MAX_MSIX_P_PORT 17
48#define MAX_MSIX 64 50#define MAX_MSIX 64
49#define MIN_MSIX_P_PORT 5 51#define MIN_MSIX_P_PORT 5
@@ -856,6 +858,7 @@ struct mlx4_dev {
856 u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; 858 u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
857 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; 859 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
858 struct mlx4_vf_dev *dev_vfs; 860 struct mlx4_vf_dev *dev_vfs;
861 u8 uar_page_shift;
859}; 862};
860 863
861struct mlx4_clock_params { 864struct mlx4_clock_params {
@@ -1528,4 +1531,14 @@ int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
1528int mlx4_get_internal_clock_params(struct mlx4_dev *dev, 1531int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
1529 struct mlx4_clock_params *params); 1532 struct mlx4_clock_params *params);
1530 1533
1534static inline int mlx4_to_hw_uar_index(struct mlx4_dev *dev, int index)
1535{
1536 return (index << (PAGE_SHIFT - dev->uar_page_shift));
1537}
1538
1539static inline int mlx4_get_num_reserved_uar(struct mlx4_dev *dev)
1540{
1541 /* The first 128 UARs are used for EQ doorbells */
1542 return (128 >> (PAGE_SHIFT - dev->uar_page_shift));
1543}
1531#endif /* MLX4_DEVICE_H */ 1544#endif /* MLX4_DEVICE_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 231ab6bcea76..51f1e540fc2b 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -207,15 +207,15 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
207 u8 outer_dmac[0x1]; 207 u8 outer_dmac[0x1];
208 u8 outer_smac[0x1]; 208 u8 outer_smac[0x1];
209 u8 outer_ether_type[0x1]; 209 u8 outer_ether_type[0x1];
210 u8 reserved_0[0x1]; 210 u8 reserved_at_3[0x1];
211 u8 outer_first_prio[0x1]; 211 u8 outer_first_prio[0x1];
212 u8 outer_first_cfi[0x1]; 212 u8 outer_first_cfi[0x1];
213 u8 outer_first_vid[0x1]; 213 u8 outer_first_vid[0x1];
214 u8 reserved_1[0x1]; 214 u8 reserved_at_7[0x1];
215 u8 outer_second_prio[0x1]; 215 u8 outer_second_prio[0x1];
216 u8 outer_second_cfi[0x1]; 216 u8 outer_second_cfi[0x1];
217 u8 outer_second_vid[0x1]; 217 u8 outer_second_vid[0x1];
218 u8 reserved_2[0x1]; 218 u8 reserved_at_b[0x1];
219 u8 outer_sip[0x1]; 219 u8 outer_sip[0x1];
220 u8 outer_dip[0x1]; 220 u8 outer_dip[0x1];
221 u8 outer_frag[0x1]; 221 u8 outer_frag[0x1];
@@ -230,21 +230,21 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
230 u8 outer_gre_protocol[0x1]; 230 u8 outer_gre_protocol[0x1];
231 u8 outer_gre_key[0x1]; 231 u8 outer_gre_key[0x1];
232 u8 outer_vxlan_vni[0x1]; 232 u8 outer_vxlan_vni[0x1];
233 u8 reserved_3[0x5]; 233 u8 reserved_at_1a[0x5];
234 u8 source_eswitch_port[0x1]; 234 u8 source_eswitch_port[0x1];
235 235
236 u8 inner_dmac[0x1]; 236 u8 inner_dmac[0x1];
237 u8 inner_smac[0x1]; 237 u8 inner_smac[0x1];
238 u8 inner_ether_type[0x1]; 238 u8 inner_ether_type[0x1];
239 u8 reserved_4[0x1]; 239 u8 reserved_at_23[0x1];
240 u8 inner_first_prio[0x1]; 240 u8 inner_first_prio[0x1];
241 u8 inner_first_cfi[0x1]; 241 u8 inner_first_cfi[0x1];
242 u8 inner_first_vid[0x1]; 242 u8 inner_first_vid[0x1];
243 u8 reserved_5[0x1]; 243 u8 reserved_at_27[0x1];
244 u8 inner_second_prio[0x1]; 244 u8 inner_second_prio[0x1];
245 u8 inner_second_cfi[0x1]; 245 u8 inner_second_cfi[0x1];
246 u8 inner_second_vid[0x1]; 246 u8 inner_second_vid[0x1];
247 u8 reserved_6[0x1]; 247 u8 reserved_at_2b[0x1];
248 u8 inner_sip[0x1]; 248 u8 inner_sip[0x1];
249 u8 inner_dip[0x1]; 249 u8 inner_dip[0x1];
250 u8 inner_frag[0x1]; 250 u8 inner_frag[0x1];
@@ -256,37 +256,37 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
256 u8 inner_tcp_sport[0x1]; 256 u8 inner_tcp_sport[0x1];
257 u8 inner_tcp_dport[0x1]; 257 u8 inner_tcp_dport[0x1];
258 u8 inner_tcp_flags[0x1]; 258 u8 inner_tcp_flags[0x1];
259 u8 reserved_7[0x9]; 259 u8 reserved_at_37[0x9];
260 260
261 u8 reserved_8[0x40]; 261 u8 reserved_at_40[0x40];
262}; 262};
263 263
264struct mlx5_ifc_flow_table_prop_layout_bits { 264struct mlx5_ifc_flow_table_prop_layout_bits {
265 u8 ft_support[0x1]; 265 u8 ft_support[0x1];
266 u8 reserved_0[0x2]; 266 u8 reserved_at_1[0x2];
267 u8 flow_modify_en[0x1]; 267 u8 flow_modify_en[0x1];
268 u8 modify_root[0x1]; 268 u8 modify_root[0x1];
269 u8 identified_miss_table_mode[0x1]; 269 u8 identified_miss_table_mode[0x1];
270 u8 flow_table_modify[0x1]; 270 u8 flow_table_modify[0x1];
271 u8 reserved_1[0x19]; 271 u8 reserved_at_7[0x19];
272 272
273 u8 reserved_2[0x2]; 273 u8 reserved_at_20[0x2];
274 u8 log_max_ft_size[0x6]; 274 u8 log_max_ft_size[0x6];
275 u8 reserved_3[0x10]; 275 u8 reserved_at_28[0x10];
276 u8 max_ft_level[0x8]; 276 u8 max_ft_level[0x8];
277 277
278 u8 reserved_4[0x20]; 278 u8 reserved_at_40[0x20];
279 279
280 u8 reserved_5[0x18]; 280 u8 reserved_at_60[0x18];
281 u8 log_max_ft_num[0x8]; 281 u8 log_max_ft_num[0x8];
282 282
283 u8 reserved_6[0x18]; 283 u8 reserved_at_80[0x18];
284 u8 log_max_destination[0x8]; 284 u8 log_max_destination[0x8];
285 285
286 u8 reserved_7[0x18]; 286 u8 reserved_at_a0[0x18];
287 u8 log_max_flow[0x8]; 287 u8 log_max_flow[0x8];
288 288
289 u8 reserved_8[0x40]; 289 u8 reserved_at_c0[0x40];
290 290
291 struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support; 291 struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
292 292
@@ -298,13 +298,13 @@ struct mlx5_ifc_odp_per_transport_service_cap_bits {
298 u8 receive[0x1]; 298 u8 receive[0x1];
299 u8 write[0x1]; 299 u8 write[0x1];
300 u8 read[0x1]; 300 u8 read[0x1];
301 u8 reserved_0[0x1]; 301 u8 reserved_at_4[0x1];
302 u8 srq_receive[0x1]; 302 u8 srq_receive[0x1];
303 u8 reserved_1[0x1a]; 303 u8 reserved_at_6[0x1a];
304}; 304};
305 305
306struct mlx5_ifc_ipv4_layout_bits { 306struct mlx5_ifc_ipv4_layout_bits {
307 u8 reserved_0[0x60]; 307 u8 reserved_at_0[0x60];
308 308
309 u8 ipv4[0x20]; 309 u8 ipv4[0x20];
310}; 310};
@@ -316,7 +316,7 @@ struct mlx5_ifc_ipv6_layout_bits {
316union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits { 316union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
317 struct mlx5_ifc_ipv6_layout_bits ipv6_layout; 317 struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
318 struct mlx5_ifc_ipv4_layout_bits ipv4_layout; 318 struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
319 u8 reserved_0[0x80]; 319 u8 reserved_at_0[0x80];
320}; 320};
321 321
322struct mlx5_ifc_fte_match_set_lyr_2_4_bits { 322struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
@@ -336,15 +336,15 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
336 u8 ip_dscp[0x6]; 336 u8 ip_dscp[0x6];
337 u8 ip_ecn[0x2]; 337 u8 ip_ecn[0x2];
338 u8 vlan_tag[0x1]; 338 u8 vlan_tag[0x1];
339 u8 reserved_0[0x1]; 339 u8 reserved_at_91[0x1];
340 u8 frag[0x1]; 340 u8 frag[0x1];
341 u8 reserved_1[0x4]; 341 u8 reserved_at_93[0x4];
342 u8 tcp_flags[0x9]; 342 u8 tcp_flags[0x9];
343 343
344 u8 tcp_sport[0x10]; 344 u8 tcp_sport[0x10];
345 u8 tcp_dport[0x10]; 345 u8 tcp_dport[0x10];
346 346
347 u8 reserved_2[0x20]; 347 u8 reserved_at_c0[0x20];
348 348
349 u8 udp_sport[0x10]; 349 u8 udp_sport[0x10];
350 u8 udp_dport[0x10]; 350 u8 udp_dport[0x10];
@@ -355,9 +355,9 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
355}; 355};
356 356
357struct mlx5_ifc_fte_match_set_misc_bits { 357struct mlx5_ifc_fte_match_set_misc_bits {
358 u8 reserved_0[0x20]; 358 u8 reserved_at_0[0x20];
359 359
360 u8 reserved_1[0x10]; 360 u8 reserved_at_20[0x10];
361 u8 source_port[0x10]; 361 u8 source_port[0x10];
362 362
363 u8 outer_second_prio[0x3]; 363 u8 outer_second_prio[0x3];
@@ -369,31 +369,31 @@ struct mlx5_ifc_fte_match_set_misc_bits {
369 369
370 u8 outer_second_vlan_tag[0x1]; 370 u8 outer_second_vlan_tag[0x1];
371 u8 inner_second_vlan_tag[0x1]; 371 u8 inner_second_vlan_tag[0x1];
372 u8 reserved_2[0xe]; 372 u8 reserved_at_62[0xe];
373 u8 gre_protocol[0x10]; 373 u8 gre_protocol[0x10];
374 374
375 u8 gre_key_h[0x18]; 375 u8 gre_key_h[0x18];
376 u8 gre_key_l[0x8]; 376 u8 gre_key_l[0x8];
377 377
378 u8 vxlan_vni[0x18]; 378 u8 vxlan_vni[0x18];
379 u8 reserved_3[0x8]; 379 u8 reserved_at_b8[0x8];
380 380
381 u8 reserved_4[0x20]; 381 u8 reserved_at_c0[0x20];
382 382
383 u8 reserved_5[0xc]; 383 u8 reserved_at_e0[0xc];
384 u8 outer_ipv6_flow_label[0x14]; 384 u8 outer_ipv6_flow_label[0x14];
385 385
386 u8 reserved_6[0xc]; 386 u8 reserved_at_100[0xc];
387 u8 inner_ipv6_flow_label[0x14]; 387 u8 inner_ipv6_flow_label[0x14];
388 388
389 u8 reserved_7[0xe0]; 389 u8 reserved_at_120[0xe0];
390}; 390};
391 391
392struct mlx5_ifc_cmd_pas_bits { 392struct mlx5_ifc_cmd_pas_bits {
393 u8 pa_h[0x20]; 393 u8 pa_h[0x20];
394 394
395 u8 pa_l[0x14]; 395 u8 pa_l[0x14];
396 u8 reserved_0[0xc]; 396 u8 reserved_at_34[0xc];
397}; 397};
398 398
399struct mlx5_ifc_uint64_bits { 399struct mlx5_ifc_uint64_bits {
@@ -418,31 +418,31 @@ enum {
418struct mlx5_ifc_ads_bits { 418struct mlx5_ifc_ads_bits {
419 u8 fl[0x1]; 419 u8 fl[0x1];
420 u8 free_ar[0x1]; 420 u8 free_ar[0x1];
421 u8 reserved_0[0xe]; 421 u8 reserved_at_2[0xe];
422 u8 pkey_index[0x10]; 422 u8 pkey_index[0x10];
423 423
424 u8 reserved_1[0x8]; 424 u8 reserved_at_20[0x8];
425 u8 grh[0x1]; 425 u8 grh[0x1];
426 u8 mlid[0x7]; 426 u8 mlid[0x7];
427 u8 rlid[0x10]; 427 u8 rlid[0x10];
428 428
429 u8 ack_timeout[0x5]; 429 u8 ack_timeout[0x5];
430 u8 reserved_2[0x3]; 430 u8 reserved_at_45[0x3];
431 u8 src_addr_index[0x8]; 431 u8 src_addr_index[0x8];
432 u8 reserved_3[0x4]; 432 u8 reserved_at_50[0x4];
433 u8 stat_rate[0x4]; 433 u8 stat_rate[0x4];
434 u8 hop_limit[0x8]; 434 u8 hop_limit[0x8];
435 435
436 u8 reserved_4[0x4]; 436 u8 reserved_at_60[0x4];
437 u8 tclass[0x8]; 437 u8 tclass[0x8];
438 u8 flow_label[0x14]; 438 u8 flow_label[0x14];
439 439
440 u8 rgid_rip[16][0x8]; 440 u8 rgid_rip[16][0x8];
441 441
442 u8 reserved_5[0x4]; 442 u8 reserved_at_100[0x4];
443 u8 f_dscp[0x1]; 443 u8 f_dscp[0x1];
444 u8 f_ecn[0x1]; 444 u8 f_ecn[0x1];
445 u8 reserved_6[0x1]; 445 u8 reserved_at_106[0x1];
446 u8 f_eth_prio[0x1]; 446 u8 f_eth_prio[0x1];
447 u8 ecn[0x2]; 447 u8 ecn[0x2];
448 u8 dscp[0x6]; 448 u8 dscp[0x6];
@@ -458,25 +458,25 @@ struct mlx5_ifc_ads_bits {
458}; 458};
459 459
460struct mlx5_ifc_flow_table_nic_cap_bits { 460struct mlx5_ifc_flow_table_nic_cap_bits {
461 u8 reserved_0[0x200]; 461 u8 reserved_at_0[0x200];
462 462
463 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive; 463 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
464 464
465 u8 reserved_1[0x200]; 465 u8 reserved_at_400[0x200];
466 466
467 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer; 467 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer;
468 468
469 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit; 469 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit;
470 470
471 u8 reserved_2[0x200]; 471 u8 reserved_at_a00[0x200];
472 472
473 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer; 473 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
474 474
475 u8 reserved_3[0x7200]; 475 u8 reserved_at_e00[0x7200];
476}; 476};
477 477
478struct mlx5_ifc_flow_table_eswitch_cap_bits { 478struct mlx5_ifc_flow_table_eswitch_cap_bits {
479 u8 reserved_0[0x200]; 479 u8 reserved_at_0[0x200];
480 480
481 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb; 481 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
482 482
@@ -484,7 +484,7 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
484 484
485 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress; 485 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress;
486 486
487 u8 reserved_1[0x7800]; 487 u8 reserved_at_800[0x7800];
488}; 488};
489 489
490struct mlx5_ifc_e_switch_cap_bits { 490struct mlx5_ifc_e_switch_cap_bits {
@@ -493,9 +493,9 @@ struct mlx5_ifc_e_switch_cap_bits {
493 u8 vport_svlan_insert[0x1]; 493 u8 vport_svlan_insert[0x1];
494 u8 vport_cvlan_insert_if_not_exist[0x1]; 494 u8 vport_cvlan_insert_if_not_exist[0x1];
495 u8 vport_cvlan_insert_overwrite[0x1]; 495 u8 vport_cvlan_insert_overwrite[0x1];
496 u8 reserved_0[0x1b]; 496 u8 reserved_at_5[0x1b];
497 497
498 u8 reserved_1[0x7e0]; 498 u8 reserved_at_20[0x7e0];
499}; 499};
500 500
501struct mlx5_ifc_per_protocol_networking_offload_caps_bits { 501struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
@@ -504,51 +504,51 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
504 u8 lro_cap[0x1]; 504 u8 lro_cap[0x1];
505 u8 lro_psh_flag[0x1]; 505 u8 lro_psh_flag[0x1];
506 u8 lro_time_stamp[0x1]; 506 u8 lro_time_stamp[0x1];
507 u8 reserved_0[0x3]; 507 u8 reserved_at_5[0x3];
508 u8 self_lb_en_modifiable[0x1]; 508 u8 self_lb_en_modifiable[0x1];
509 u8 reserved_1[0x2]; 509 u8 reserved_at_9[0x2];
510 u8 max_lso_cap[0x5]; 510 u8 max_lso_cap[0x5];
511 u8 reserved_2[0x4]; 511 u8 reserved_at_10[0x4];
512 u8 rss_ind_tbl_cap[0x4]; 512 u8 rss_ind_tbl_cap[0x4];
513 u8 reserved_3[0x3]; 513 u8 reserved_at_18[0x3];
514 u8 tunnel_lso_const_out_ip_id[0x1]; 514 u8 tunnel_lso_const_out_ip_id[0x1];
515 u8 reserved_4[0x2]; 515 u8 reserved_at_1c[0x2];
516 u8 tunnel_statless_gre[0x1]; 516 u8 tunnel_statless_gre[0x1];
517 u8 tunnel_stateless_vxlan[0x1]; 517 u8 tunnel_stateless_vxlan[0x1];
518 518
519 u8 reserved_5[0x20]; 519 u8 reserved_at_20[0x20];
520 520
521 u8 reserved_6[0x10]; 521 u8 reserved_at_40[0x10];
522 u8 lro_min_mss_size[0x10]; 522 u8 lro_min_mss_size[0x10];
523 523
524 u8 reserved_7[0x120]; 524 u8 reserved_at_60[0x120];
525 525
526 u8 lro_timer_supported_periods[4][0x20]; 526 u8 lro_timer_supported_periods[4][0x20];
527 527
528 u8 reserved_8[0x600]; 528 u8 reserved_at_200[0x600];
529}; 529};
530 530
531struct mlx5_ifc_roce_cap_bits { 531struct mlx5_ifc_roce_cap_bits {
532 u8 roce_apm[0x1]; 532 u8 roce_apm[0x1];
533 u8 reserved_0[0x1f]; 533 u8 reserved_at_1[0x1f];
534 534
535 u8 reserved_1[0x60]; 535 u8 reserved_at_20[0x60];
536 536
537 u8 reserved_2[0xc]; 537 u8 reserved_at_80[0xc];
538 u8 l3_type[0x4]; 538 u8 l3_type[0x4];
539 u8 reserved_3[0x8]; 539 u8 reserved_at_90[0x8];
540 u8 roce_version[0x8]; 540 u8 roce_version[0x8];
541 541
542 u8 reserved_4[0x10]; 542 u8 reserved_at_a0[0x10];
543 u8 r_roce_dest_udp_port[0x10]; 543 u8 r_roce_dest_udp_port[0x10];
544 544
545 u8 r_roce_max_src_udp_port[0x10]; 545 u8 r_roce_max_src_udp_port[0x10];
546 u8 r_roce_min_src_udp_port[0x10]; 546 u8 r_roce_min_src_udp_port[0x10];
547 547
548 u8 reserved_5[0x10]; 548 u8 reserved_at_e0[0x10];
549 u8 roce_address_table_size[0x10]; 549 u8 roce_address_table_size[0x10];
550 550
551 u8 reserved_6[0x700]; 551 u8 reserved_at_100[0x700];
552}; 552};
553 553
554enum { 554enum {
@@ -576,35 +576,35 @@ enum {
576}; 576};
577 577
578struct mlx5_ifc_atomic_caps_bits { 578struct mlx5_ifc_atomic_caps_bits {
579 u8 reserved_0[0x40]; 579 u8 reserved_at_0[0x40];
580 580
581 u8 atomic_req_8B_endianess_mode[0x2]; 581 u8 atomic_req_8B_endianess_mode[0x2];
582 u8 reserved_1[0x4]; 582 u8 reserved_at_42[0x4];
583 u8 supported_atomic_req_8B_endianess_mode_1[0x1]; 583 u8 supported_atomic_req_8B_endianess_mode_1[0x1];
584 584
585 u8 reserved_2[0x19]; 585 u8 reserved_at_47[0x19];
586 586
587 u8 reserved_3[0x20]; 587 u8 reserved_at_60[0x20];
588 588
589 u8 reserved_4[0x10]; 589 u8 reserved_at_80[0x10];
590 u8 atomic_operations[0x10]; 590 u8 atomic_operations[0x10];
591 591
592 u8 reserved_5[0x10]; 592 u8 reserved_at_a0[0x10];
593 u8 atomic_size_qp[0x10]; 593 u8 atomic_size_qp[0x10];
594 594
595 u8 reserved_6[0x10]; 595 u8 reserved_at_c0[0x10];
596 u8 atomic_size_dc[0x10]; 596 u8 atomic_size_dc[0x10];
597 597
598 u8 reserved_7[0x720]; 598 u8 reserved_at_e0[0x720];
599}; 599};
600 600
601struct mlx5_ifc_odp_cap_bits { 601struct mlx5_ifc_odp_cap_bits {
602 u8 reserved_0[0x40]; 602 u8 reserved_at_0[0x40];
603 603
604 u8 sig[0x1]; 604 u8 sig[0x1];
605 u8 reserved_1[0x1f]; 605 u8 reserved_at_41[0x1f];
606 606
607 u8 reserved_2[0x20]; 607 u8 reserved_at_60[0x20];
608 608
609 struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps; 609 struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps;
610 610
@@ -612,7 +612,7 @@ struct mlx5_ifc_odp_cap_bits {
612 612
613 struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps; 613 struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps;
614 614
615 u8 reserved_3[0x720]; 615 u8 reserved_at_e0[0x720];
616}; 616};
617 617
618enum { 618enum {
@@ -660,55 +660,55 @@ enum {
660}; 660};
661 661
662struct mlx5_ifc_cmd_hca_cap_bits { 662struct mlx5_ifc_cmd_hca_cap_bits {
663 u8 reserved_0[0x80]; 663 u8 reserved_at_0[0x80];
664 664
665 u8 log_max_srq_sz[0x8]; 665 u8 log_max_srq_sz[0x8];
666 u8 log_max_qp_sz[0x8]; 666 u8 log_max_qp_sz[0x8];
667 u8 reserved_1[0xb]; 667 u8 reserved_at_90[0xb];
668 u8 log_max_qp[0x5]; 668 u8 log_max_qp[0x5];
669 669
670 u8 reserved_2[0xb]; 670 u8 reserved_at_a0[0xb];
671 u8 log_max_srq[0x5]; 671 u8 log_max_srq[0x5];
672 u8 reserved_3[0x10]; 672 u8 reserved_at_b0[0x10];
673 673
674 u8 reserved_4[0x8]; 674 u8 reserved_at_c0[0x8];
675 u8 log_max_cq_sz[0x8]; 675 u8 log_max_cq_sz[0x8];
676 u8 reserved_5[0xb]; 676 u8 reserved_at_d0[0xb];
677 u8 log_max_cq[0x5]; 677 u8 log_max_cq[0x5];
678 678
679 u8 log_max_eq_sz[0x8]; 679 u8 log_max_eq_sz[0x8];
680 u8 reserved_6[0x2]; 680 u8 reserved_at_e8[0x2];
681 u8 log_max_mkey[0x6]; 681 u8 log_max_mkey[0x6];
682 u8 reserved_7[0xc]; 682 u8 reserved_at_f0[0xc];
683 u8 log_max_eq[0x4]; 683 u8 log_max_eq[0x4];
684 684
685 u8 max_indirection[0x8]; 685 u8 max_indirection[0x8];
686 u8 reserved_8[0x1]; 686 u8 reserved_at_108[0x1];
687 u8 log_max_mrw_sz[0x7]; 687 u8 log_max_mrw_sz[0x7];
688 u8 reserved_9[0x2]; 688 u8 reserved_at_110[0x2];
689 u8 log_max_bsf_list_size[0x6]; 689 u8 log_max_bsf_list_size[0x6];
690 u8 reserved_10[0x2]; 690 u8 reserved_at_118[0x2];
691 u8 log_max_klm_list_size[0x6]; 691 u8 log_max_klm_list_size[0x6];
692 692
693 u8 reserved_11[0xa]; 693 u8 reserved_at_120[0xa];
694 u8 log_max_ra_req_dc[0x6]; 694 u8 log_max_ra_req_dc[0x6];
695 u8 reserved_12[0xa]; 695 u8 reserved_at_130[0xa];
696 u8 log_max_ra_res_dc[0x6]; 696 u8 log_max_ra_res_dc[0x6];
697 697
698 u8 reserved_13[0xa]; 698 u8 reserved_at_140[0xa];
699 u8 log_max_ra_req_qp[0x6]; 699 u8 log_max_ra_req_qp[0x6];
700 u8 reserved_14[0xa]; 700 u8 reserved_at_150[0xa];
701 u8 log_max_ra_res_qp[0x6]; 701 u8 log_max_ra_res_qp[0x6];
702 702
703 u8 pad_cap[0x1]; 703 u8 pad_cap[0x1];
704 u8 cc_query_allowed[0x1]; 704 u8 cc_query_allowed[0x1];
705 u8 cc_modify_allowed[0x1]; 705 u8 cc_modify_allowed[0x1];
706 u8 reserved_15[0xd]; 706 u8 reserved_at_163[0xd];
707 u8 gid_table_size[0x10]; 707 u8 gid_table_size[0x10];
708 708
709 u8 out_of_seq_cnt[0x1]; 709 u8 out_of_seq_cnt[0x1];
710 u8 vport_counters[0x1]; 710 u8 vport_counters[0x1];
711 u8 reserved_16[0x4]; 711 u8 reserved_at_182[0x4];
712 u8 max_qp_cnt[0xa]; 712 u8 max_qp_cnt[0xa];
713 u8 pkey_table_size[0x10]; 713 u8 pkey_table_size[0x10];
714 714
@@ -716,158 +716,158 @@ struct mlx5_ifc_cmd_hca_cap_bits {
716 u8 vhca_group_manager[0x1]; 716 u8 vhca_group_manager[0x1];
717 u8 ib_virt[0x1]; 717 u8 ib_virt[0x1];
718 u8 eth_virt[0x1]; 718 u8 eth_virt[0x1];
719 u8 reserved_17[0x1]; 719 u8 reserved_at_1a4[0x1];
720 u8 ets[0x1]; 720 u8 ets[0x1];
721 u8 nic_flow_table[0x1]; 721 u8 nic_flow_table[0x1];
722 u8 eswitch_flow_table[0x1]; 722 u8 eswitch_flow_table[0x1];
723 u8 early_vf_enable; 723 u8 early_vf_enable;
724 u8 reserved_18[0x2]; 724 u8 reserved_at_1a8[0x2];
725 u8 local_ca_ack_delay[0x5]; 725 u8 local_ca_ack_delay[0x5];
726 u8 reserved_19[0x6]; 726 u8 reserved_at_1af[0x6];
727 u8 port_type[0x2]; 727 u8 port_type[0x2];
728 u8 num_ports[0x8]; 728 u8 num_ports[0x8];
729 729
730 u8 reserved_20[0x3]; 730 u8 reserved_at_1bf[0x3];
731 u8 log_max_msg[0x5]; 731 u8 log_max_msg[0x5];
732 u8 reserved_21[0x18]; 732 u8 reserved_at_1c7[0x18];
733 733
734 u8 stat_rate_support[0x10]; 734 u8 stat_rate_support[0x10];
735 u8 reserved_22[0xc]; 735 u8 reserved_at_1ef[0xc];
736 u8 cqe_version[0x4]; 736 u8 cqe_version[0x4];
737 737
738 u8 compact_address_vector[0x1]; 738 u8 compact_address_vector[0x1];
739 u8 reserved_23[0xe]; 739 u8 reserved_at_200[0xe];
740 u8 drain_sigerr[0x1]; 740 u8 drain_sigerr[0x1];
741 u8 cmdif_checksum[0x2]; 741 u8 cmdif_checksum[0x2];
742 u8 sigerr_cqe[0x1]; 742 u8 sigerr_cqe[0x1];
743 u8 reserved_24[0x1]; 743 u8 reserved_at_212[0x1];
744 u8 wq_signature[0x1]; 744 u8 wq_signature[0x1];
745 u8 sctr_data_cqe[0x1]; 745 u8 sctr_data_cqe[0x1];
746 u8 reserved_25[0x1]; 746 u8 reserved_at_215[0x1];
747 u8 sho[0x1]; 747 u8 sho[0x1];
748 u8 tph[0x1]; 748 u8 tph[0x1];
749 u8 rf[0x1]; 749 u8 rf[0x1];
750 u8 dct[0x1]; 750 u8 dct[0x1];
751 u8 reserved_26[0x1]; 751 u8 reserved_at_21a[0x1];
752 u8 eth_net_offloads[0x1]; 752 u8 eth_net_offloads[0x1];
753 u8 roce[0x1]; 753 u8 roce[0x1];
754 u8 atomic[0x1]; 754 u8 atomic[0x1];
755 u8 reserved_27[0x1]; 755 u8 reserved_at_21e[0x1];
756 756
757 u8 cq_oi[0x1]; 757 u8 cq_oi[0x1];
758 u8 cq_resize[0x1]; 758 u8 cq_resize[0x1];
759 u8 cq_moderation[0x1]; 759 u8 cq_moderation[0x1];
760 u8 reserved_28[0x3]; 760 u8 reserved_at_222[0x3];
761 u8 cq_eq_remap[0x1]; 761 u8 cq_eq_remap[0x1];
762 u8 pg[0x1]; 762 u8 pg[0x1];
763 u8 block_lb_mc[0x1]; 763 u8 block_lb_mc[0x1];
764 u8 reserved_29[0x1]; 764 u8 reserved_at_228[0x1];
765 u8 scqe_break_moderation[0x1]; 765 u8 scqe_break_moderation[0x1];
766 u8 reserved_30[0x1]; 766 u8 reserved_at_22a[0x1];
767 u8 cd[0x1]; 767 u8 cd[0x1];
768 u8 reserved_31[0x1]; 768 u8 reserved_at_22c[0x1];
769 u8 apm[0x1]; 769 u8 apm[0x1];
770 u8 reserved_32[0x7]; 770 u8 reserved_at_22e[0x7];
771 u8 qkv[0x1]; 771 u8 qkv[0x1];
772 u8 pkv[0x1]; 772 u8 pkv[0x1];
773 u8 reserved_33[0x4]; 773 u8 reserved_at_237[0x4];
774 u8 xrc[0x1]; 774 u8 xrc[0x1];
775 u8 ud[0x1]; 775 u8 ud[0x1];
776 u8 uc[0x1]; 776 u8 uc[0x1];
777 u8 rc[0x1]; 777 u8 rc[0x1];
778 778
779 u8 reserved_34[0xa]; 779 u8 reserved_at_23f[0xa];
780 u8 uar_sz[0x6]; 780 u8 uar_sz[0x6];
781 u8 reserved_35[0x8]; 781 u8 reserved_at_24f[0x8];
782 u8 log_pg_sz[0x8]; 782 u8 log_pg_sz[0x8];
783 783
784 u8 bf[0x1]; 784 u8 bf[0x1];
785 u8 reserved_36[0x1]; 785 u8 reserved_at_260[0x1];
786 u8 pad_tx_eth_packet[0x1]; 786 u8 pad_tx_eth_packet[0x1];
787 u8 reserved_37[0x8]; 787 u8 reserved_at_262[0x8];
788 u8 log_bf_reg_size[0x5]; 788 u8 log_bf_reg_size[0x5];
789 u8 reserved_38[0x10]; 789 u8 reserved_at_26f[0x10];
790 790
791 u8 reserved_39[0x10]; 791 u8 reserved_at_27f[0x10];
792 u8 max_wqe_sz_sq[0x10]; 792 u8 max_wqe_sz_sq[0x10];
793 793
794 u8 reserved_40[0x10]; 794 u8 reserved_at_29f[0x10];
795 u8 max_wqe_sz_rq[0x10]; 795 u8 max_wqe_sz_rq[0x10];
796 796
797 u8 reserved_41[0x10]; 797 u8 reserved_at_2bf[0x10];
798 u8 max_wqe_sz_sq_dc[0x10]; 798 u8 max_wqe_sz_sq_dc[0x10];
799 799
800 u8 reserved_42[0x7]; 800 u8 reserved_at_2df[0x7];
801 u8 max_qp_mcg[0x19]; 801 u8 max_qp_mcg[0x19];
802 802
803 u8 reserved_43[0x18]; 803 u8 reserved_at_2ff[0x18];
804 u8 log_max_mcg[0x8]; 804 u8 log_max_mcg[0x8];
805 805
806 u8 reserved_44[0x3]; 806 u8 reserved_at_31f[0x3];
807 u8 log_max_transport_domain[0x5]; 807 u8 log_max_transport_domain[0x5];
808 u8 reserved_45[0x3]; 808 u8 reserved_at_327[0x3];
809 u8 log_max_pd[0x5]; 809 u8 log_max_pd[0x5];
810 u8 reserved_46[0xb]; 810 u8 reserved_at_32f[0xb];
811 u8 log_max_xrcd[0x5]; 811 u8 log_max_xrcd[0x5];
812 812
813 u8 reserved_47[0x20]; 813 u8 reserved_at_33f[0x20];
814 814
815 u8 reserved_48[0x3]; 815 u8 reserved_at_35f[0x3];
816 u8 log_max_rq[0x5]; 816 u8 log_max_rq[0x5];
817 u8 reserved_49[0x3]; 817 u8 reserved_at_367[0x3];
818 u8 log_max_sq[0x5]; 818 u8 log_max_sq[0x5];
819 u8 reserved_50[0x3]; 819 u8 reserved_at_36f[0x3];
820 u8 log_max_tir[0x5]; 820 u8 log_max_tir[0x5];
821 u8 reserved_51[0x3]; 821 u8 reserved_at_377[0x3];
822 u8 log_max_tis[0x5]; 822 u8 log_max_tis[0x5];
823 823
824 u8 basic_cyclic_rcv_wqe[0x1]; 824 u8 basic_cyclic_rcv_wqe[0x1];
825 u8 reserved_52[0x2]; 825 u8 reserved_at_380[0x2];
826 u8 log_max_rmp[0x5]; 826 u8 log_max_rmp[0x5];
827 u8 reserved_53[0x3]; 827 u8 reserved_at_387[0x3];
828 u8 log_max_rqt[0x5]; 828 u8 log_max_rqt[0x5];
829 u8 reserved_54[0x3]; 829 u8 reserved_at_38f[0x3];
830 u8 log_max_rqt_size[0x5]; 830 u8 log_max_rqt_size[0x5];
831 u8 reserved_55[0x3]; 831 u8 reserved_at_397[0x3];
832 u8 log_max_tis_per_sq[0x5]; 832 u8 log_max_tis_per_sq[0x5];
833 833
834 u8 reserved_56[0x3]; 834 u8 reserved_at_39f[0x3];
835 u8 log_max_stride_sz_rq[0x5]; 835 u8 log_max_stride_sz_rq[0x5];
836 u8 reserved_57[0x3]; 836 u8 reserved_at_3a7[0x3];
837 u8 log_min_stride_sz_rq[0x5]; 837 u8 log_min_stride_sz_rq[0x5];
838 u8 reserved_58[0x3]; 838 u8 reserved_at_3af[0x3];
839 u8 log_max_stride_sz_sq[0x5]; 839 u8 log_max_stride_sz_sq[0x5];
840 u8 reserved_59[0x3]; 840 u8 reserved_at_3b7[0x3];
841 u8 log_min_stride_sz_sq[0x5]; 841 u8 log_min_stride_sz_sq[0x5];
842 842
843 u8 reserved_60[0x1b]; 843 u8 reserved_at_3bf[0x1b];
844 u8 log_max_wq_sz[0x5]; 844 u8 log_max_wq_sz[0x5];
845 845
846 u8 nic_vport_change_event[0x1]; 846 u8 nic_vport_change_event[0x1];
847 u8 reserved_61[0xa]; 847 u8 reserved_at_3e0[0xa];
848 u8 log_max_vlan_list[0x5]; 848 u8 log_max_vlan_list[0x5];
849 u8 reserved_62[0x3]; 849 u8 reserved_at_3ef[0x3];
850 u8 log_max_current_mc_list[0x5]; 850 u8 log_max_current_mc_list[0x5];
851 u8 reserved_63[0x3]; 851 u8 reserved_at_3f7[0x3];
852 u8 log_max_current_uc_list[0x5]; 852 u8 log_max_current_uc_list[0x5];
853 853
854 u8 reserved_64[0x80]; 854 u8 reserved_at_3ff[0x80];
855 855
856 u8 reserved_65[0x3]; 856 u8 reserved_at_47f[0x3];
857 u8 log_max_l2_table[0x5]; 857 u8 log_max_l2_table[0x5];
858 u8 reserved_66[0x8]; 858 u8 reserved_at_487[0x8];
859 u8 log_uar_page_sz[0x10]; 859 u8 log_uar_page_sz[0x10];
860 860
861 u8 reserved_67[0x20]; 861 u8 reserved_at_49f[0x20];
862 u8 device_frequency_mhz[0x20]; 862 u8 device_frequency_mhz[0x20];
863 u8 device_frequency_khz[0x20]; 863 u8 device_frequency_khz[0x20];
864 u8 reserved_68[0x5f]; 864 u8 reserved_at_4ff[0x5f];
865 u8 cqe_zip[0x1]; 865 u8 cqe_zip[0x1];
866 866
867 u8 cqe_zip_timeout[0x10]; 867 u8 cqe_zip_timeout[0x10];
868 u8 cqe_zip_max_num[0x10]; 868 u8 cqe_zip_max_num[0x10];
869 869
870 u8 reserved_69[0x220]; 870 u8 reserved_at_57f[0x220];
871}; 871};
872 872
873enum mlx5_flow_destination_type { 873enum mlx5_flow_destination_type {
@@ -880,7 +880,7 @@ struct mlx5_ifc_dest_format_struct_bits {
880 u8 destination_type[0x8]; 880 u8 destination_type[0x8];
881 u8 destination_id[0x18]; 881 u8 destination_id[0x18];
882 882
883 u8 reserved_0[0x20]; 883 u8 reserved_at_20[0x20];
884}; 884};
885 885
886struct mlx5_ifc_fte_match_param_bits { 886struct mlx5_ifc_fte_match_param_bits {
@@ -890,7 +890,7 @@ struct mlx5_ifc_fte_match_param_bits {
890 890
891 struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers; 891 struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
892 892
893 u8 reserved_0[0xa00]; 893 u8 reserved_at_600[0xa00];
894}; 894};
895 895
896enum { 896enum {
@@ -922,18 +922,18 @@ struct mlx5_ifc_wq_bits {
922 u8 wq_signature[0x1]; 922 u8 wq_signature[0x1];
923 u8 end_padding_mode[0x2]; 923 u8 end_padding_mode[0x2];
924 u8 cd_slave[0x1]; 924 u8 cd_slave[0x1];
925 u8 reserved_0[0x18]; 925 u8 reserved_at_8[0x18];
926 926
927 u8 hds_skip_first_sge[0x1]; 927 u8 hds_skip_first_sge[0x1];
928 u8 log2_hds_buf_size[0x3]; 928 u8 log2_hds_buf_size[0x3];
929 u8 reserved_1[0x7]; 929 u8 reserved_at_24[0x7];
930 u8 page_offset[0x5]; 930 u8 page_offset[0x5];
931 u8 lwm[0x10]; 931 u8 lwm[0x10];
932 932
933 u8 reserved_2[0x8]; 933 u8 reserved_at_40[0x8];
934 u8 pd[0x18]; 934 u8 pd[0x18];
935 935
936 u8 reserved_3[0x8]; 936 u8 reserved_at_60[0x8];
937 u8 uar_page[0x18]; 937 u8 uar_page[0x18];
938 938
939 u8 dbr_addr[0x40]; 939 u8 dbr_addr[0x40];
@@ -942,60 +942,60 @@ struct mlx5_ifc_wq_bits {
942 942
943 u8 sw_counter[0x20]; 943 u8 sw_counter[0x20];
944 944
945 u8 reserved_4[0xc]; 945 u8 reserved_at_100[0xc];
946 u8 log_wq_stride[0x4]; 946 u8 log_wq_stride[0x4];
947 u8 reserved_5[0x3]; 947 u8 reserved_at_110[0x3];
948 u8 log_wq_pg_sz[0x5]; 948 u8 log_wq_pg_sz[0x5];
949 u8 reserved_6[0x3]; 949 u8 reserved_at_118[0x3];
950 u8 log_wq_sz[0x5]; 950 u8 log_wq_sz[0x5];
951 951
952 u8 reserved_7[0x4e0]; 952 u8 reserved_at_120[0x4e0];
953 953
954 struct mlx5_ifc_cmd_pas_bits pas[0]; 954 struct mlx5_ifc_cmd_pas_bits pas[0];
955}; 955};
956 956
957struct mlx5_ifc_rq_num_bits { 957struct mlx5_ifc_rq_num_bits {
958 u8 reserved_0[0x8]; 958 u8 reserved_at_0[0x8];
959 u8 rq_num[0x18]; 959 u8 rq_num[0x18];
960}; 960};
961 961
962struct mlx5_ifc_mac_address_layout_bits { 962struct mlx5_ifc_mac_address_layout_bits {
963 u8 reserved_0[0x10]; 963 u8 reserved_at_0[0x10];
964 u8 mac_addr_47_32[0x10]; 964 u8 mac_addr_47_32[0x10];
965 965
966 u8 mac_addr_31_0[0x20]; 966 u8 mac_addr_31_0[0x20];
967}; 967};
968 968
969struct mlx5_ifc_vlan_layout_bits { 969struct mlx5_ifc_vlan_layout_bits {
970 u8 reserved_0[0x14]; 970 u8 reserved_at_0[0x14];
971 u8 vlan[0x0c]; 971 u8 vlan[0x0c];
972 972
973 u8 reserved_1[0x20]; 973 u8 reserved_at_20[0x20];
974}; 974};
975 975
976struct mlx5_ifc_cong_control_r_roce_ecn_np_bits { 976struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
977 u8 reserved_0[0xa0]; 977 u8 reserved_at_0[0xa0];
978 978
979 u8 min_time_between_cnps[0x20]; 979 u8 min_time_between_cnps[0x20];
980 980
981 u8 reserved_1[0x12]; 981 u8 reserved_at_c0[0x12];
982 u8 cnp_dscp[0x6]; 982 u8 cnp_dscp[0x6];
983 u8 reserved_2[0x5]; 983 u8 reserved_at_d8[0x5];
984 u8 cnp_802p_prio[0x3]; 984 u8 cnp_802p_prio[0x3];
985 985
986 u8 reserved_3[0x720]; 986 u8 reserved_at_e0[0x720];
987}; 987};
988 988
989struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits { 989struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
990 u8 reserved_0[0x60]; 990 u8 reserved_at_0[0x60];
991 991
992 u8 reserved_1[0x4]; 992 u8 reserved_at_60[0x4];
993 u8 clamp_tgt_rate[0x1]; 993 u8 clamp_tgt_rate[0x1];
994 u8 reserved_2[0x3]; 994 u8 reserved_at_65[0x3];
995 u8 clamp_tgt_rate_after_time_inc[0x1]; 995 u8 clamp_tgt_rate_after_time_inc[0x1];
996 u8 reserved_3[0x17]; 996 u8 reserved_at_69[0x17];
997 997
998 u8 reserved_4[0x20]; 998 u8 reserved_at_80[0x20];
999 999
1000 u8 rpg_time_reset[0x20]; 1000 u8 rpg_time_reset[0x20];
1001 1001
@@ -1015,7 +1015,7 @@ struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
1015 1015
1016 u8 rpg_min_rate[0x20]; 1016 u8 rpg_min_rate[0x20];
1017 1017
1018 u8 reserved_5[0xe0]; 1018 u8 reserved_at_1c0[0xe0];
1019 1019
1020 u8 rate_to_set_on_first_cnp[0x20]; 1020 u8 rate_to_set_on_first_cnp[0x20];
1021 1021
@@ -1025,15 +1025,15 @@ struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
1025 1025
1026 u8 rate_reduce_monitor_period[0x20]; 1026 u8 rate_reduce_monitor_period[0x20];
1027 1027
1028 u8 reserved_6[0x20]; 1028 u8 reserved_at_320[0x20];
1029 1029
1030 u8 initial_alpha_value[0x20]; 1030 u8 initial_alpha_value[0x20];
1031 1031
1032 u8 reserved_7[0x4a0]; 1032 u8 reserved_at_360[0x4a0];
1033}; 1033};
1034 1034
1035struct mlx5_ifc_cong_control_802_1qau_rp_bits { 1035struct mlx5_ifc_cong_control_802_1qau_rp_bits {
1036 u8 reserved_0[0x80]; 1036 u8 reserved_at_0[0x80];
1037 1037
1038 u8 rppp_max_rps[0x20]; 1038 u8 rppp_max_rps[0x20];
1039 1039
@@ -1055,7 +1055,7 @@ struct mlx5_ifc_cong_control_802_1qau_rp_bits {
1055 1055
1056 u8 rpg_min_rate[0x20]; 1056 u8 rpg_min_rate[0x20];
1057 1057
1058 u8 reserved_1[0x640]; 1058 u8 reserved_at_1c0[0x640];
1059}; 1059};
1060 1060
1061enum { 1061enum {
@@ -1205,7 +1205,7 @@ struct mlx5_ifc_phys_layer_cntrs_bits {
1205 1205
1206 u8 successful_recovery_events[0x20]; 1206 u8 successful_recovery_events[0x20];
1207 1207
1208 u8 reserved_0[0x180]; 1208 u8 reserved_at_640[0x180];
1209}; 1209};
1210 1210
1211struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits { 1211struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
@@ -1213,7 +1213,7 @@ struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
1213 1213
1214 u8 transmit_queue_low[0x20]; 1214 u8 transmit_queue_low[0x20];
1215 1215
1216 u8 reserved_0[0x780]; 1216 u8 reserved_at_40[0x780];
1217}; 1217};
1218 1218
1219struct mlx5_ifc_eth_per_prio_grp_data_layout_bits { 1219struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
@@ -1221,7 +1221,7 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
1221 1221
1222 u8 rx_octets_low[0x20]; 1222 u8 rx_octets_low[0x20];
1223 1223
1224 u8 reserved_0[0xc0]; 1224 u8 reserved_at_40[0xc0];
1225 1225
1226 u8 rx_frames_high[0x20]; 1226 u8 rx_frames_high[0x20];
1227 1227
@@ -1231,7 +1231,7 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
1231 1231
1232 u8 tx_octets_low[0x20]; 1232 u8 tx_octets_low[0x20];
1233 1233
1234 u8 reserved_1[0xc0]; 1234 u8 reserved_at_180[0xc0];
1235 1235
1236 u8 tx_frames_high[0x20]; 1236 u8 tx_frames_high[0x20];
1237 1237
@@ -1257,7 +1257,7 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
1257 1257
1258 u8 rx_pause_transition_low[0x20]; 1258 u8 rx_pause_transition_low[0x20];
1259 1259
1260 u8 reserved_2[0x400]; 1260 u8 reserved_at_3c0[0x400];
1261}; 1261};
1262 1262
1263struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits { 1263struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
@@ -1265,7 +1265,7 @@ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
1265 1265
1266 u8 port_transmit_wait_low[0x20]; 1266 u8 port_transmit_wait_low[0x20];
1267 1267
1268 u8 reserved_0[0x780]; 1268 u8 reserved_at_40[0x780];
1269}; 1269};
1270 1270
1271struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits { 1271struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
@@ -1333,7 +1333,7 @@ struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
1333 1333
1334 u8 dot3out_pause_frames_low[0x20]; 1334 u8 dot3out_pause_frames_low[0x20];
1335 1335
1336 u8 reserved_0[0x3c0]; 1336 u8 reserved_at_400[0x3c0];
1337}; 1337};
1338 1338
1339struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits { 1339struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits {
@@ -1421,7 +1421,7 @@ struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits {
1421 1421
1422 u8 ether_stats_pkts8192to10239octets_low[0x20]; 1422 u8 ether_stats_pkts8192to10239octets_low[0x20];
1423 1423
1424 u8 reserved_0[0x280]; 1424 u8 reserved_at_540[0x280];
1425}; 1425};
1426 1426
1427struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits { 1427struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits {
@@ -1477,7 +1477,7 @@ struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits {
1477 1477
1478 u8 if_out_broadcast_pkts_low[0x20]; 1478 u8 if_out_broadcast_pkts_low[0x20];
1479 1479
1480 u8 reserved_0[0x480]; 1480 u8 reserved_at_340[0x480];
1481}; 1481};
1482 1482
1483struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits { 1483struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
@@ -1557,54 +1557,54 @@ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
1557 1557
1558 u8 a_pause_mac_ctrl_frames_transmitted_low[0x20]; 1558 u8 a_pause_mac_ctrl_frames_transmitted_low[0x20];
1559 1559
1560 u8 reserved_0[0x300]; 1560 u8 reserved_at_4c0[0x300];
1561}; 1561};
1562 1562
1563struct mlx5_ifc_cmd_inter_comp_event_bits { 1563struct mlx5_ifc_cmd_inter_comp_event_bits {
1564 u8 command_completion_vector[0x20]; 1564 u8 command_completion_vector[0x20];
1565 1565
1566 u8 reserved_0[0xc0]; 1566 u8 reserved_at_20[0xc0];
1567}; 1567};
1568 1568
1569struct mlx5_ifc_stall_vl_event_bits { 1569struct mlx5_ifc_stall_vl_event_bits {
1570 u8 reserved_0[0x18]; 1570 u8 reserved_at_0[0x18];
1571 u8 port_num[0x1]; 1571 u8 port_num[0x1];
1572 u8 reserved_1[0x3]; 1572 u8 reserved_at_19[0x3];
1573 u8 vl[0x4]; 1573 u8 vl[0x4];
1574 1574
1575 u8 reserved_2[0xa0]; 1575 u8 reserved_at_20[0xa0];
1576}; 1576};
1577 1577
1578struct mlx5_ifc_db_bf_congestion_event_bits { 1578struct mlx5_ifc_db_bf_congestion_event_bits {
1579 u8 event_subtype[0x8]; 1579 u8 event_subtype[0x8];
1580 u8 reserved_0[0x8]; 1580 u8 reserved_at_8[0x8];
1581 u8 congestion_level[0x8]; 1581 u8 congestion_level[0x8];
1582 u8 reserved_1[0x8]; 1582 u8 reserved_at_18[0x8];
1583 1583
1584 u8 reserved_2[0xa0]; 1584 u8 reserved_at_20[0xa0];
1585}; 1585};
1586 1586
1587struct mlx5_ifc_gpio_event_bits { 1587struct mlx5_ifc_gpio_event_bits {
1588 u8 reserved_0[0x60]; 1588 u8 reserved_at_0[0x60];
1589 1589
1590 u8 gpio_event_hi[0x20]; 1590 u8 gpio_event_hi[0x20];
1591 1591
1592 u8 gpio_event_lo[0x20]; 1592 u8 gpio_event_lo[0x20];
1593 1593
1594 u8 reserved_1[0x40]; 1594 u8 reserved_at_a0[0x40];
1595}; 1595};
1596 1596
1597struct mlx5_ifc_port_state_change_event_bits { 1597struct mlx5_ifc_port_state_change_event_bits {
1598 u8 reserved_0[0x40]; 1598 u8 reserved_at_0[0x40];
1599 1599
1600 u8 port_num[0x4]; 1600 u8 port_num[0x4];
1601 u8 reserved_1[0x1c]; 1601 u8 reserved_at_44[0x1c];
1602 1602
1603 u8 reserved_2[0x80]; 1603 u8 reserved_at_60[0x80];
1604}; 1604};
1605 1605
1606struct mlx5_ifc_dropped_packet_logged_bits { 1606struct mlx5_ifc_dropped_packet_logged_bits {
1607 u8 reserved_0[0xe0]; 1607 u8 reserved_at_0[0xe0];
1608}; 1608};
1609 1609
1610enum { 1610enum {
@@ -1613,15 +1613,15 @@ enum {
1613}; 1613};
1614 1614
1615struct mlx5_ifc_cq_error_bits { 1615struct mlx5_ifc_cq_error_bits {
1616 u8 reserved_0[0x8]; 1616 u8 reserved_at_0[0x8];
1617 u8 cqn[0x18]; 1617 u8 cqn[0x18];
1618 1618
1619 u8 reserved_1[0x20]; 1619 u8 reserved_at_20[0x20];
1620 1620
1621 u8 reserved_2[0x18]; 1621 u8 reserved_at_40[0x18];
1622 u8 syndrome[0x8]; 1622 u8 syndrome[0x8];
1623 1623
1624 u8 reserved_3[0x80]; 1624 u8 reserved_at_60[0x80];
1625}; 1625};
1626 1626
1627struct mlx5_ifc_rdma_page_fault_event_bits { 1627struct mlx5_ifc_rdma_page_fault_event_bits {
@@ -1629,14 +1629,14 @@ struct mlx5_ifc_rdma_page_fault_event_bits {
1629 1629
1630 u8 r_key[0x20]; 1630 u8 r_key[0x20];
1631 1631
1632 u8 reserved_0[0x10]; 1632 u8 reserved_at_40[0x10];
1633 u8 packet_len[0x10]; 1633 u8 packet_len[0x10];
1634 1634
1635 u8 rdma_op_len[0x20]; 1635 u8 rdma_op_len[0x20];
1636 1636
1637 u8 rdma_va[0x40]; 1637 u8 rdma_va[0x40];
1638 1638
1639 u8 reserved_1[0x5]; 1639 u8 reserved_at_c0[0x5];
1640 u8 rdma[0x1]; 1640 u8 rdma[0x1];
1641 u8 write[0x1]; 1641 u8 write[0x1];
1642 u8 requestor[0x1]; 1642 u8 requestor[0x1];
@@ -1646,15 +1646,15 @@ struct mlx5_ifc_rdma_page_fault_event_bits {
1646struct mlx5_ifc_wqe_associated_page_fault_event_bits { 1646struct mlx5_ifc_wqe_associated_page_fault_event_bits {
1647 u8 bytes_committed[0x20]; 1647 u8 bytes_committed[0x20];
1648 1648
1649 u8 reserved_0[0x10]; 1649 u8 reserved_at_20[0x10];
1650 u8 wqe_index[0x10]; 1650 u8 wqe_index[0x10];
1651 1651
1652 u8 reserved_1[0x10]; 1652 u8 reserved_at_40[0x10];
1653 u8 len[0x10]; 1653 u8 len[0x10];
1654 1654
1655 u8 reserved_2[0x60]; 1655 u8 reserved_at_60[0x60];
1656 1656
1657 u8 reserved_3[0x5]; 1657 u8 reserved_at_c0[0x5];
1658 u8 rdma[0x1]; 1658 u8 rdma[0x1];
1659 u8 write_read[0x1]; 1659 u8 write_read[0x1];
1660 u8 requestor[0x1]; 1660 u8 requestor[0x1];
@@ -1662,26 +1662,26 @@ struct mlx5_ifc_wqe_associated_page_fault_event_bits {
1662}; 1662};
1663 1663
1664struct mlx5_ifc_qp_events_bits { 1664struct mlx5_ifc_qp_events_bits {
1665 u8 reserved_0[0xa0]; 1665 u8 reserved_at_0[0xa0];
1666 1666
1667 u8 type[0x8]; 1667 u8 type[0x8];
1668 u8 reserved_1[0x18]; 1668 u8 reserved_at_a8[0x18];
1669 1669
1670 u8 reserved_2[0x8]; 1670 u8 reserved_at_c0[0x8];
1671 u8 qpn_rqn_sqn[0x18]; 1671 u8 qpn_rqn_sqn[0x18];
1672}; 1672};
1673 1673
1674struct mlx5_ifc_dct_events_bits { 1674struct mlx5_ifc_dct_events_bits {
1675 u8 reserved_0[0xc0]; 1675 u8 reserved_at_0[0xc0];
1676 1676
1677 u8 reserved_1[0x8]; 1677 u8 reserved_at_c0[0x8];
1678 u8 dct_number[0x18]; 1678 u8 dct_number[0x18];
1679}; 1679};
1680 1680
1681struct mlx5_ifc_comp_event_bits { 1681struct mlx5_ifc_comp_event_bits {
1682 u8 reserved_0[0xc0]; 1682 u8 reserved_at_0[0xc0];
1683 1683
1684 u8 reserved_1[0x8]; 1684 u8 reserved_at_c0[0x8];
1685 u8 cq_number[0x18]; 1685 u8 cq_number[0x18];
1686}; 1686};
1687 1687
@@ -1754,41 +1754,41 @@ enum {
1754 1754
1755struct mlx5_ifc_qpc_bits { 1755struct mlx5_ifc_qpc_bits {
1756 u8 state[0x4]; 1756 u8 state[0x4];
1757 u8 reserved_0[0x4]; 1757 u8 reserved_at_4[0x4];
1758 u8 st[0x8]; 1758 u8 st[0x8];
1759 u8 reserved_1[0x3]; 1759 u8 reserved_at_10[0x3];
1760 u8 pm_state[0x2]; 1760 u8 pm_state[0x2];
1761 u8 reserved_2[0x7]; 1761 u8 reserved_at_15[0x7];
1762 u8 end_padding_mode[0x2]; 1762 u8 end_padding_mode[0x2];
1763 u8 reserved_3[0x2]; 1763 u8 reserved_at_1e[0x2];
1764 1764
1765 u8 wq_signature[0x1]; 1765 u8 wq_signature[0x1];
1766 u8 block_lb_mc[0x1]; 1766 u8 block_lb_mc[0x1];
1767 u8 atomic_like_write_en[0x1]; 1767 u8 atomic_like_write_en[0x1];
1768 u8 latency_sensitive[0x1]; 1768 u8 latency_sensitive[0x1];
1769 u8 reserved_4[0x1]; 1769 u8 reserved_at_24[0x1];
1770 u8 drain_sigerr[0x1]; 1770 u8 drain_sigerr[0x1];
1771 u8 reserved_5[0x2]; 1771 u8 reserved_at_26[0x2];
1772 u8 pd[0x18]; 1772 u8 pd[0x18];
1773 1773
1774 u8 mtu[0x3]; 1774 u8 mtu[0x3];
1775 u8 log_msg_max[0x5]; 1775 u8 log_msg_max[0x5];
1776 u8 reserved_6[0x1]; 1776 u8 reserved_at_48[0x1];
1777 u8 log_rq_size[0x4]; 1777 u8 log_rq_size[0x4];
1778 u8 log_rq_stride[0x3]; 1778 u8 log_rq_stride[0x3];
1779 u8 no_sq[0x1]; 1779 u8 no_sq[0x1];
1780 u8 log_sq_size[0x4]; 1780 u8 log_sq_size[0x4];
1781 u8 reserved_7[0x6]; 1781 u8 reserved_at_55[0x6];
1782 u8 rlky[0x1]; 1782 u8 rlky[0x1];
1783 u8 reserved_8[0x4]; 1783 u8 reserved_at_5c[0x4];
1784 1784
1785 u8 counter_set_id[0x8]; 1785 u8 counter_set_id[0x8];
1786 u8 uar_page[0x18]; 1786 u8 uar_page[0x18];
1787 1787
1788 u8 reserved_9[0x8]; 1788 u8 reserved_at_80[0x8];
1789 u8 user_index[0x18]; 1789 u8 user_index[0x18];
1790 1790
1791 u8 reserved_10[0x3]; 1791 u8 reserved_at_a0[0x3];
1792 u8 log_page_size[0x5]; 1792 u8 log_page_size[0x5];
1793 u8 remote_qpn[0x18]; 1793 u8 remote_qpn[0x18];
1794 1794
@@ -1797,66 +1797,66 @@ struct mlx5_ifc_qpc_bits {
1797 struct mlx5_ifc_ads_bits secondary_address_path; 1797 struct mlx5_ifc_ads_bits secondary_address_path;
1798 1798
1799 u8 log_ack_req_freq[0x4]; 1799 u8 log_ack_req_freq[0x4];
1800 u8 reserved_11[0x4]; 1800 u8 reserved_at_384[0x4];
1801 u8 log_sra_max[0x3]; 1801 u8 log_sra_max[0x3];
1802 u8 reserved_12[0x2]; 1802 u8 reserved_at_38b[0x2];
1803 u8 retry_count[0x3]; 1803 u8 retry_count[0x3];
1804 u8 rnr_retry[0x3]; 1804 u8 rnr_retry[0x3];
1805 u8 reserved_13[0x1]; 1805 u8 reserved_at_393[0x1];
1806 u8 fre[0x1]; 1806 u8 fre[0x1];
1807 u8 cur_rnr_retry[0x3]; 1807 u8 cur_rnr_retry[0x3];
1808 u8 cur_retry_count[0x3]; 1808 u8 cur_retry_count[0x3];
1809 u8 reserved_14[0x5]; 1809 u8 reserved_at_39b[0x5];
1810 1810
1811 u8 reserved_15[0x20]; 1811 u8 reserved_at_3a0[0x20];
1812 1812
1813 u8 reserved_16[0x8]; 1813 u8 reserved_at_3c0[0x8];
1814 u8 next_send_psn[0x18]; 1814 u8 next_send_psn[0x18];
1815 1815
1816 u8 reserved_17[0x8]; 1816 u8 reserved_at_3e0[0x8];
1817 u8 cqn_snd[0x18]; 1817 u8 cqn_snd[0x18];
1818 1818
1819 u8 reserved_18[0x40]; 1819 u8 reserved_at_400[0x40];
1820 1820
1821 u8 reserved_19[0x8]; 1821 u8 reserved_at_440[0x8];
1822 u8 last_acked_psn[0x18]; 1822 u8 last_acked_psn[0x18];
1823 1823
1824 u8 reserved_20[0x8]; 1824 u8 reserved_at_460[0x8];
1825 u8 ssn[0x18]; 1825 u8 ssn[0x18];
1826 1826
1827 u8 reserved_21[0x8]; 1827 u8 reserved_at_480[0x8];
1828 u8 log_rra_max[0x3]; 1828 u8 log_rra_max[0x3];
1829 u8 reserved_22[0x1]; 1829 u8 reserved_at_48b[0x1];
1830 u8 atomic_mode[0x4]; 1830 u8 atomic_mode[0x4];
1831 u8 rre[0x1]; 1831 u8 rre[0x1];
1832 u8 rwe[0x1]; 1832 u8 rwe[0x1];
1833 u8 rae[0x1]; 1833 u8 rae[0x1];
1834 u8 reserved_23[0x1]; 1834 u8 reserved_at_493[0x1];
1835 u8 page_offset[0x6]; 1835 u8 page_offset[0x6];
1836 u8 reserved_24[0x3]; 1836 u8 reserved_at_49a[0x3];
1837 u8 cd_slave_receive[0x1]; 1837 u8 cd_slave_receive[0x1];
1838 u8 cd_slave_send[0x1]; 1838 u8 cd_slave_send[0x1];
1839 u8 cd_master[0x1]; 1839 u8 cd_master[0x1];
1840 1840
1841 u8 reserved_25[0x3]; 1841 u8 reserved_at_4a0[0x3];
1842 u8 min_rnr_nak[0x5]; 1842 u8 min_rnr_nak[0x5];
1843 u8 next_rcv_psn[0x18]; 1843 u8 next_rcv_psn[0x18];
1844 1844
1845 u8 reserved_26[0x8]; 1845 u8 reserved_at_4c0[0x8];
1846 u8 xrcd[0x18]; 1846 u8 xrcd[0x18];
1847 1847
1848 u8 reserved_27[0x8]; 1848 u8 reserved_at_4e0[0x8];
1849 u8 cqn_rcv[0x18]; 1849 u8 cqn_rcv[0x18];
1850 1850
1851 u8 dbr_addr[0x40]; 1851 u8 dbr_addr[0x40];
1852 1852
1853 u8 q_key[0x20]; 1853 u8 q_key[0x20];
1854 1854
1855 u8 reserved_28[0x5]; 1855 u8 reserved_at_560[0x5];
1856 u8 rq_type[0x3]; 1856 u8 rq_type[0x3];
1857 u8 srqn_rmpn[0x18]; 1857 u8 srqn_rmpn[0x18];
1858 1858
1859 u8 reserved_29[0x8]; 1859 u8 reserved_at_580[0x8];
1860 u8 rmsn[0x18]; 1860 u8 rmsn[0x18];
1861 1861
1862 u8 hw_sq_wqebb_counter[0x10]; 1862 u8 hw_sq_wqebb_counter[0x10];
@@ -1866,33 +1866,33 @@ struct mlx5_ifc_qpc_bits {
1866 1866
1867 u8 sw_rq_counter[0x20]; 1867 u8 sw_rq_counter[0x20];
1868 1868
1869 u8 reserved_30[0x20]; 1869 u8 reserved_at_600[0x20];
1870 1870
1871 u8 reserved_31[0xf]; 1871 u8 reserved_at_620[0xf];
1872 u8 cgs[0x1]; 1872 u8 cgs[0x1];
1873 u8 cs_req[0x8]; 1873 u8 cs_req[0x8];
1874 u8 cs_res[0x8]; 1874 u8 cs_res[0x8];
1875 1875
1876 u8 dc_access_key[0x40]; 1876 u8 dc_access_key[0x40];
1877 1877
1878 u8 reserved_32[0xc0]; 1878 u8 reserved_at_680[0xc0];
1879}; 1879};
1880 1880
1881struct mlx5_ifc_roce_addr_layout_bits { 1881struct mlx5_ifc_roce_addr_layout_bits {
1882 u8 source_l3_address[16][0x8]; 1882 u8 source_l3_address[16][0x8];
1883 1883
1884 u8 reserved_0[0x3]; 1884 u8 reserved_at_80[0x3];
1885 u8 vlan_valid[0x1]; 1885 u8 vlan_valid[0x1];
1886 u8 vlan_id[0xc]; 1886 u8 vlan_id[0xc];
1887 u8 source_mac_47_32[0x10]; 1887 u8 source_mac_47_32[0x10];
1888 1888
1889 u8 source_mac_31_0[0x20]; 1889 u8 source_mac_31_0[0x20];
1890 1890
1891 u8 reserved_1[0x14]; 1891 u8 reserved_at_c0[0x14];
1892 u8 roce_l3_type[0x4]; 1892 u8 roce_l3_type[0x4];
1893 u8 roce_version[0x8]; 1893 u8 roce_version[0x8];
1894 1894
1895 u8 reserved_2[0x20]; 1895 u8 reserved_at_e0[0x20];
1896}; 1896};
1897 1897
1898union mlx5_ifc_hca_cap_union_bits { 1898union mlx5_ifc_hca_cap_union_bits {
@@ -1904,7 +1904,7 @@ union mlx5_ifc_hca_cap_union_bits {
1904 struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap; 1904 struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
1905 struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap; 1905 struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
1906 struct mlx5_ifc_e_switch_cap_bits e_switch_cap; 1906 struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
1907 u8 reserved_0[0x8000]; 1907 u8 reserved_at_0[0x8000];
1908}; 1908};
1909 1909
1910enum { 1910enum {
@@ -1914,24 +1914,24 @@ enum {
1914}; 1914};
1915 1915
1916struct mlx5_ifc_flow_context_bits { 1916struct mlx5_ifc_flow_context_bits {
1917 u8 reserved_0[0x20]; 1917 u8 reserved_at_0[0x20];
1918 1918
1919 u8 group_id[0x20]; 1919 u8 group_id[0x20];
1920 1920
1921 u8 reserved_1[0x8]; 1921 u8 reserved_at_40[0x8];
1922 u8 flow_tag[0x18]; 1922 u8 flow_tag[0x18];
1923 1923
1924 u8 reserved_2[0x10]; 1924 u8 reserved_at_60[0x10];
1925 u8 action[0x10]; 1925 u8 action[0x10];
1926 1926
1927 u8 reserved_3[0x8]; 1927 u8 reserved_at_80[0x8];
1928 u8 destination_list_size[0x18]; 1928 u8 destination_list_size[0x18];
1929 1929
1930 u8 reserved_4[0x160]; 1930 u8 reserved_at_a0[0x160];
1931 1931
1932 struct mlx5_ifc_fte_match_param_bits match_value; 1932 struct mlx5_ifc_fte_match_param_bits match_value;
1933 1933
1934 u8 reserved_5[0x600]; 1934 u8 reserved_at_1200[0x600];
1935 1935
1936 struct mlx5_ifc_dest_format_struct_bits destination[0]; 1936 struct mlx5_ifc_dest_format_struct_bits destination[0];
1937}; 1937};
@@ -1944,43 +1944,43 @@ enum {
1944struct mlx5_ifc_xrc_srqc_bits { 1944struct mlx5_ifc_xrc_srqc_bits {
1945 u8 state[0x4]; 1945 u8 state[0x4];
1946 u8 log_xrc_srq_size[0x4]; 1946 u8 log_xrc_srq_size[0x4];
1947 u8 reserved_0[0x18]; 1947 u8 reserved_at_8[0x18];
1948 1948
1949 u8 wq_signature[0x1]; 1949 u8 wq_signature[0x1];
1950 u8 cont_srq[0x1]; 1950 u8 cont_srq[0x1];
1951 u8 reserved_1[0x1]; 1951 u8 reserved_at_22[0x1];
1952 u8 rlky[0x1]; 1952 u8 rlky[0x1];
1953 u8 basic_cyclic_rcv_wqe[0x1]; 1953 u8 basic_cyclic_rcv_wqe[0x1];
1954 u8 log_rq_stride[0x3]; 1954 u8 log_rq_stride[0x3];
1955 u8 xrcd[0x18]; 1955 u8 xrcd[0x18];
1956 1956
1957 u8 page_offset[0x6]; 1957 u8 page_offset[0x6];
1958 u8 reserved_2[0x2]; 1958 u8 reserved_at_46[0x2];
1959 u8 cqn[0x18]; 1959 u8 cqn[0x18];
1960 1960
1961 u8 reserved_3[0x20]; 1961 u8 reserved_at_60[0x20];
1962 1962
1963 u8 user_index_equal_xrc_srqn[0x1]; 1963 u8 user_index_equal_xrc_srqn[0x1];
1964 u8 reserved_4[0x1]; 1964 u8 reserved_at_81[0x1];
1965 u8 log_page_size[0x6]; 1965 u8 log_page_size[0x6];
1966 u8 user_index[0x18]; 1966 u8 user_index[0x18];
1967 1967
1968 u8 reserved_5[0x20]; 1968 u8 reserved_at_a0[0x20];
1969 1969
1970 u8 reserved_6[0x8]; 1970 u8 reserved_at_c0[0x8];
1971 u8 pd[0x18]; 1971 u8 pd[0x18];
1972 1972
1973 u8 lwm[0x10]; 1973 u8 lwm[0x10];
1974 u8 wqe_cnt[0x10]; 1974 u8 wqe_cnt[0x10];
1975 1975
1976 u8 reserved_7[0x40]; 1976 u8 reserved_at_100[0x40];
1977 1977
1978 u8 db_record_addr_h[0x20]; 1978 u8 db_record_addr_h[0x20];
1979 1979
1980 u8 db_record_addr_l[0x1e]; 1980 u8 db_record_addr_l[0x1e];
1981 u8 reserved_8[0x2]; 1981 u8 reserved_at_17e[0x2];
1982 1982
1983 u8 reserved_9[0x80]; 1983 u8 reserved_at_180[0x80];
1984}; 1984};
1985 1985
1986struct mlx5_ifc_traffic_counter_bits { 1986struct mlx5_ifc_traffic_counter_bits {
@@ -1990,16 +1990,16 @@ struct mlx5_ifc_traffic_counter_bits {
1990}; 1990};
1991 1991
1992struct mlx5_ifc_tisc_bits { 1992struct mlx5_ifc_tisc_bits {
1993 u8 reserved_0[0xc]; 1993 u8 reserved_at_0[0xc];
1994 u8 prio[0x4]; 1994 u8 prio[0x4];
1995 u8 reserved_1[0x10]; 1995 u8 reserved_at_10[0x10];
1996 1996
1997 u8 reserved_2[0x100]; 1997 u8 reserved_at_20[0x100];
1998 1998
1999 u8 reserved_3[0x8]; 1999 u8 reserved_at_120[0x8];
2000 u8 transport_domain[0x18]; 2000 u8 transport_domain[0x18];
2001 2001
2002 u8 reserved_4[0x3c0]; 2002 u8 reserved_at_140[0x3c0];
2003}; 2003};
2004 2004
2005enum { 2005enum {
@@ -2024,31 +2024,31 @@ enum {
2024}; 2024};
2025 2025
2026struct mlx5_ifc_tirc_bits { 2026struct mlx5_ifc_tirc_bits {
2027 u8 reserved_0[0x20]; 2027 u8 reserved_at_0[0x20];
2028 2028
2029 u8 disp_type[0x4]; 2029 u8 disp_type[0x4];
2030 u8 reserved_1[0x1c]; 2030 u8 reserved_at_24[0x1c];
2031 2031
2032 u8 reserved_2[0x40]; 2032 u8 reserved_at_40[0x40];
2033 2033
2034 u8 reserved_3[0x4]; 2034 u8 reserved_at_80[0x4];
2035 u8 lro_timeout_period_usecs[0x10]; 2035 u8 lro_timeout_period_usecs[0x10];
2036 u8 lro_enable_mask[0x4]; 2036 u8 lro_enable_mask[0x4];
2037 u8 lro_max_ip_payload_size[0x8]; 2037 u8 lro_max_ip_payload_size[0x8];
2038 2038
2039 u8 reserved_4[0x40]; 2039 u8 reserved_at_a0[0x40];
2040 2040
2041 u8 reserved_5[0x8]; 2041 u8 reserved_at_e0[0x8];
2042 u8 inline_rqn[0x18]; 2042 u8 inline_rqn[0x18];
2043 2043
2044 u8 rx_hash_symmetric[0x1]; 2044 u8 rx_hash_symmetric[0x1];
2045 u8 reserved_6[0x1]; 2045 u8 reserved_at_101[0x1];
2046 u8 tunneled_offload_en[0x1]; 2046 u8 tunneled_offload_en[0x1];
2047 u8 reserved_7[0x5]; 2047 u8 reserved_at_103[0x5];
2048 u8 indirect_table[0x18]; 2048 u8 indirect_table[0x18];
2049 2049
2050 u8 rx_hash_fn[0x4]; 2050 u8 rx_hash_fn[0x4];
2051 u8 reserved_8[0x2]; 2051 u8 reserved_at_124[0x2];
2052 u8 self_lb_block[0x2]; 2052 u8 self_lb_block[0x2];
2053 u8 transport_domain[0x18]; 2053 u8 transport_domain[0x18];
2054 2054
@@ -2058,7 +2058,7 @@ struct mlx5_ifc_tirc_bits {
2058 2058
2059 struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner; 2059 struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner;
2060 2060
2061 u8 reserved_9[0x4c0]; 2061 u8 reserved_at_2c0[0x4c0];
2062}; 2062};
2063 2063
2064enum { 2064enum {
@@ -2069,39 +2069,39 @@ enum {
2069struct mlx5_ifc_srqc_bits { 2069struct mlx5_ifc_srqc_bits {
2070 u8 state[0x4]; 2070 u8 state[0x4];
2071 u8 log_srq_size[0x4]; 2071 u8 log_srq_size[0x4];
2072 u8 reserved_0[0x18]; 2072 u8 reserved_at_8[0x18];
2073 2073
2074 u8 wq_signature[0x1]; 2074 u8 wq_signature[0x1];
2075 u8 cont_srq[0x1]; 2075 u8 cont_srq[0x1];
2076 u8 reserved_1[0x1]; 2076 u8 reserved_at_22[0x1];
2077 u8 rlky[0x1]; 2077 u8 rlky[0x1];
2078 u8 reserved_2[0x1]; 2078 u8 reserved_at_24[0x1];
2079 u8 log_rq_stride[0x3]; 2079 u8 log_rq_stride[0x3];
2080 u8 xrcd[0x18]; 2080 u8 xrcd[0x18];
2081 2081
2082 u8 page_offset[0x6]; 2082 u8 page_offset[0x6];
2083 u8 reserved_3[0x2]; 2083 u8 reserved_at_46[0x2];
2084 u8 cqn[0x18]; 2084 u8 cqn[0x18];
2085 2085
2086 u8 reserved_4[0x20]; 2086 u8 reserved_at_60[0x20];
2087 2087
2088 u8 reserved_5[0x2]; 2088 u8 reserved_at_80[0x2];
2089 u8 log_page_size[0x6]; 2089 u8 log_page_size[0x6];
2090 u8 reserved_6[0x18]; 2090 u8 reserved_at_88[0x18];
2091 2091
2092 u8 reserved_7[0x20]; 2092 u8 reserved_at_a0[0x20];
2093 2093
2094 u8 reserved_8[0x8]; 2094 u8 reserved_at_c0[0x8];
2095 u8 pd[0x18]; 2095 u8 pd[0x18];
2096 2096
2097 u8 lwm[0x10]; 2097 u8 lwm[0x10];
2098 u8 wqe_cnt[0x10]; 2098 u8 wqe_cnt[0x10];
2099 2099
2100 u8 reserved_9[0x40]; 2100 u8 reserved_at_100[0x40];
2101 2101
2102 u8 dbr_addr[0x40]; 2102 u8 dbr_addr[0x40];
2103 2103
2104 u8 reserved_10[0x80]; 2104 u8 reserved_at_180[0x80];
2105}; 2105};
2106 2106
2107enum { 2107enum {
@@ -2115,39 +2115,39 @@ struct mlx5_ifc_sqc_bits {
2115 u8 cd_master[0x1]; 2115 u8 cd_master[0x1];
2116 u8 fre[0x1]; 2116 u8 fre[0x1];
2117 u8 flush_in_error_en[0x1]; 2117 u8 flush_in_error_en[0x1];
2118 u8 reserved_0[0x4]; 2118 u8 reserved_at_4[0x4];
2119 u8 state[0x4]; 2119 u8 state[0x4];
2120 u8 reserved_1[0x14]; 2120 u8 reserved_at_c[0x14];
2121 2121
2122 u8 reserved_2[0x8]; 2122 u8 reserved_at_20[0x8];
2123 u8 user_index[0x18]; 2123 u8 user_index[0x18];
2124 2124
2125 u8 reserved_3[0x8]; 2125 u8 reserved_at_40[0x8];
2126 u8 cqn[0x18]; 2126 u8 cqn[0x18];
2127 2127
2128 u8 reserved_4[0xa0]; 2128 u8 reserved_at_60[0xa0];
2129 2129
2130 u8 tis_lst_sz[0x10]; 2130 u8 tis_lst_sz[0x10];
2131 u8 reserved_5[0x10]; 2131 u8 reserved_at_110[0x10];
2132 2132
2133 u8 reserved_6[0x40]; 2133 u8 reserved_at_120[0x40];
2134 2134
2135 u8 reserved_7[0x8]; 2135 u8 reserved_at_160[0x8];
2136 u8 tis_num_0[0x18]; 2136 u8 tis_num_0[0x18];
2137 2137
2138 struct mlx5_ifc_wq_bits wq; 2138 struct mlx5_ifc_wq_bits wq;
2139}; 2139};
2140 2140
2141struct mlx5_ifc_rqtc_bits { 2141struct mlx5_ifc_rqtc_bits {
2142 u8 reserved_0[0xa0]; 2142 u8 reserved_at_0[0xa0];
2143 2143
2144 u8 reserved_1[0x10]; 2144 u8 reserved_at_a0[0x10];
2145 u8 rqt_max_size[0x10]; 2145 u8 rqt_max_size[0x10];
2146 2146
2147 u8 reserved_2[0x10]; 2147 u8 reserved_at_c0[0x10];
2148 u8 rqt_actual_size[0x10]; 2148 u8 rqt_actual_size[0x10];
2149 2149
2150 u8 reserved_3[0x6a0]; 2150 u8 reserved_at_e0[0x6a0];
2151 2151
2152 struct mlx5_ifc_rq_num_bits rq_num[0]; 2152 struct mlx5_ifc_rq_num_bits rq_num[0];
2153}; 2153};
@@ -2165,27 +2165,27 @@ enum {
2165 2165
2166struct mlx5_ifc_rqc_bits { 2166struct mlx5_ifc_rqc_bits {
2167 u8 rlky[0x1]; 2167 u8 rlky[0x1];
2168 u8 reserved_0[0x2]; 2168 u8 reserved_at_1[0x2];
2169 u8 vsd[0x1]; 2169 u8 vsd[0x1];
2170 u8 mem_rq_type[0x4]; 2170 u8 mem_rq_type[0x4];
2171 u8 state[0x4]; 2171 u8 state[0x4];
2172 u8 reserved_1[0x1]; 2172 u8 reserved_at_c[0x1];
2173 u8 flush_in_error_en[0x1]; 2173 u8 flush_in_error_en[0x1];
2174 u8 reserved_2[0x12]; 2174 u8 reserved_at_e[0x12];
2175 2175
2176 u8 reserved_3[0x8]; 2176 u8 reserved_at_20[0x8];
2177 u8 user_index[0x18]; 2177 u8 user_index[0x18];
2178 2178
2179 u8 reserved_4[0x8]; 2179 u8 reserved_at_40[0x8];
2180 u8 cqn[0x18]; 2180 u8 cqn[0x18];
2181 2181
2182 u8 counter_set_id[0x8]; 2182 u8 counter_set_id[0x8];
2183 u8 reserved_5[0x18]; 2183 u8 reserved_at_68[0x18];
2184 2184
2185 u8 reserved_6[0x8]; 2185 u8 reserved_at_80[0x8];
2186 u8 rmpn[0x18]; 2186 u8 rmpn[0x18];
2187 2187
2188 u8 reserved_7[0xe0]; 2188 u8 reserved_at_a0[0xe0];
2189 2189
2190 struct mlx5_ifc_wq_bits wq; 2190 struct mlx5_ifc_wq_bits wq;
2191}; 2191};
@@ -2196,31 +2196,31 @@ enum {
2196}; 2196};
2197 2197
2198struct mlx5_ifc_rmpc_bits { 2198struct mlx5_ifc_rmpc_bits {
2199 u8 reserved_0[0x8]; 2199 u8 reserved_at_0[0x8];
2200 u8 state[0x4]; 2200 u8 state[0x4];
2201 u8 reserved_1[0x14]; 2201 u8 reserved_at_c[0x14];
2202 2202
2203 u8 basic_cyclic_rcv_wqe[0x1]; 2203 u8 basic_cyclic_rcv_wqe[0x1];
2204 u8 reserved_2[0x1f]; 2204 u8 reserved_at_21[0x1f];
2205 2205
2206 u8 reserved_3[0x140]; 2206 u8 reserved_at_40[0x140];
2207 2207
2208 struct mlx5_ifc_wq_bits wq; 2208 struct mlx5_ifc_wq_bits wq;
2209}; 2209};
2210 2210
2211struct mlx5_ifc_nic_vport_context_bits { 2211struct mlx5_ifc_nic_vport_context_bits {
2212 u8 reserved_0[0x1f]; 2212 u8 reserved_at_0[0x1f];
2213 u8 roce_en[0x1]; 2213 u8 roce_en[0x1];
2214 2214
2215 u8 arm_change_event[0x1]; 2215 u8 arm_change_event[0x1];
2216 u8 reserved_1[0x1a]; 2216 u8 reserved_at_21[0x1a];
2217 u8 event_on_mtu[0x1]; 2217 u8 event_on_mtu[0x1];
2218 u8 event_on_promisc_change[0x1]; 2218 u8 event_on_promisc_change[0x1];
2219 u8 event_on_vlan_change[0x1]; 2219 u8 event_on_vlan_change[0x1];
2220 u8 event_on_mc_address_change[0x1]; 2220 u8 event_on_mc_address_change[0x1];
2221 u8 event_on_uc_address_change[0x1]; 2221 u8 event_on_uc_address_change[0x1];
2222 2222
2223 u8 reserved_2[0xf0]; 2223 u8 reserved_at_40[0xf0];
2224 2224
2225 u8 mtu[0x10]; 2225 u8 mtu[0x10];
2226 2226
@@ -2228,21 +2228,21 @@ struct mlx5_ifc_nic_vport_context_bits {
2228 u8 port_guid[0x40]; 2228 u8 port_guid[0x40];
2229 u8 node_guid[0x40]; 2229 u8 node_guid[0x40];
2230 2230
2231 u8 reserved_3[0x140]; 2231 u8 reserved_at_200[0x140];
2232 u8 qkey_violation_counter[0x10]; 2232 u8 qkey_violation_counter[0x10];
2233 u8 reserved_4[0x430]; 2233 u8 reserved_at_350[0x430];
2234 2234
2235 u8 promisc_uc[0x1]; 2235 u8 promisc_uc[0x1];
2236 u8 promisc_mc[0x1]; 2236 u8 promisc_mc[0x1];
2237 u8 promisc_all[0x1]; 2237 u8 promisc_all[0x1];
2238 u8 reserved_5[0x2]; 2238 u8 reserved_at_783[0x2];
2239 u8 allowed_list_type[0x3]; 2239 u8 allowed_list_type[0x3];
2240 u8 reserved_6[0xc]; 2240 u8 reserved_at_788[0xc];
2241 u8 allowed_list_size[0xc]; 2241 u8 allowed_list_size[0xc];
2242 2242
2243 struct mlx5_ifc_mac_address_layout_bits permanent_address; 2243 struct mlx5_ifc_mac_address_layout_bits permanent_address;
2244 2244
2245 u8 reserved_7[0x20]; 2245 u8 reserved_at_7e0[0x20];
2246 2246
2247 u8 current_uc_mac_address[0][0x40]; 2247 u8 current_uc_mac_address[0][0x40];
2248}; 2248};
@@ -2254,9 +2254,9 @@ enum {
2254}; 2254};
2255 2255
2256struct mlx5_ifc_mkc_bits { 2256struct mlx5_ifc_mkc_bits {
2257 u8 reserved_0[0x1]; 2257 u8 reserved_at_0[0x1];
2258 u8 free[0x1]; 2258 u8 free[0x1];
2259 u8 reserved_1[0xd]; 2259 u8 reserved_at_2[0xd];
2260 u8 small_fence_on_rdma_read_response[0x1]; 2260 u8 small_fence_on_rdma_read_response[0x1];
2261 u8 umr_en[0x1]; 2261 u8 umr_en[0x1];
2262 u8 a[0x1]; 2262 u8 a[0x1];
@@ -2265,19 +2265,19 @@ struct mlx5_ifc_mkc_bits {
2265 u8 lw[0x1]; 2265 u8 lw[0x1];
2266 u8 lr[0x1]; 2266 u8 lr[0x1];
2267 u8 access_mode[0x2]; 2267 u8 access_mode[0x2];
2268 u8 reserved_2[0x8]; 2268 u8 reserved_at_18[0x8];
2269 2269
2270 u8 qpn[0x18]; 2270 u8 qpn[0x18];
2271 u8 mkey_7_0[0x8]; 2271 u8 mkey_7_0[0x8];
2272 2272
2273 u8 reserved_3[0x20]; 2273 u8 reserved_at_40[0x20];
2274 2274
2275 u8 length64[0x1]; 2275 u8 length64[0x1];
2276 u8 bsf_en[0x1]; 2276 u8 bsf_en[0x1];
2277 u8 sync_umr[0x1]; 2277 u8 sync_umr[0x1];
2278 u8 reserved_4[0x2]; 2278 u8 reserved_at_63[0x2];
2279 u8 expected_sigerr_count[0x1]; 2279 u8 expected_sigerr_count[0x1];
2280 u8 reserved_5[0x1]; 2280 u8 reserved_at_66[0x1];
2281 u8 en_rinval[0x1]; 2281 u8 en_rinval[0x1];
2282 u8 pd[0x18]; 2282 u8 pd[0x18];
2283 2283
@@ -2287,18 +2287,18 @@ struct mlx5_ifc_mkc_bits {
2287 2287
2288 u8 bsf_octword_size[0x20]; 2288 u8 bsf_octword_size[0x20];
2289 2289
2290 u8 reserved_6[0x80]; 2290 u8 reserved_at_120[0x80];
2291 2291
2292 u8 translations_octword_size[0x20]; 2292 u8 translations_octword_size[0x20];
2293 2293
2294 u8 reserved_7[0x1b]; 2294 u8 reserved_at_1c0[0x1b];
2295 u8 log_page_size[0x5]; 2295 u8 log_page_size[0x5];
2296 2296
2297 u8 reserved_8[0x20]; 2297 u8 reserved_at_1e0[0x20];
2298}; 2298};
2299 2299
2300struct mlx5_ifc_pkey_bits { 2300struct mlx5_ifc_pkey_bits {
2301 u8 reserved_0[0x10]; 2301 u8 reserved_at_0[0x10];
2302 u8 pkey[0x10]; 2302 u8 pkey[0x10];
2303}; 2303};
2304 2304
@@ -2309,19 +2309,19 @@ struct mlx5_ifc_array128_auto_bits {
2309struct mlx5_ifc_hca_vport_context_bits { 2309struct mlx5_ifc_hca_vport_context_bits {
2310 u8 field_select[0x20]; 2310 u8 field_select[0x20];
2311 2311
2312 u8 reserved_0[0xe0]; 2312 u8 reserved_at_20[0xe0];
2313 2313
2314 u8 sm_virt_aware[0x1]; 2314 u8 sm_virt_aware[0x1];
2315 u8 has_smi[0x1]; 2315 u8 has_smi[0x1];
2316 u8 has_raw[0x1]; 2316 u8 has_raw[0x1];
2317 u8 grh_required[0x1]; 2317 u8 grh_required[0x1];
2318 u8 reserved_1[0xc]; 2318 u8 reserved_at_104[0xc];
2319 u8 port_physical_state[0x4]; 2319 u8 port_physical_state[0x4];
2320 u8 vport_state_policy[0x4]; 2320 u8 vport_state_policy[0x4];
2321 u8 port_state[0x4]; 2321 u8 port_state[0x4];
2322 u8 vport_state[0x4]; 2322 u8 vport_state[0x4];
2323 2323
2324 u8 reserved_2[0x20]; 2324 u8 reserved_at_120[0x20];
2325 2325
2326 u8 system_image_guid[0x40]; 2326 u8 system_image_guid[0x40];
2327 2327
@@ -2337,33 +2337,33 @@ struct mlx5_ifc_hca_vport_context_bits {
2337 2337
2338 u8 cap_mask2_field_select[0x20]; 2338 u8 cap_mask2_field_select[0x20];
2339 2339
2340 u8 reserved_3[0x80]; 2340 u8 reserved_at_280[0x80];
2341 2341
2342 u8 lid[0x10]; 2342 u8 lid[0x10];
2343 u8 reserved_4[0x4]; 2343 u8 reserved_at_310[0x4];
2344 u8 init_type_reply[0x4]; 2344 u8 init_type_reply[0x4];
2345 u8 lmc[0x3]; 2345 u8 lmc[0x3];
2346 u8 subnet_timeout[0x5]; 2346 u8 subnet_timeout[0x5];
2347 2347
2348 u8 sm_lid[0x10]; 2348 u8 sm_lid[0x10];
2349 u8 sm_sl[0x4]; 2349 u8 sm_sl[0x4];
2350 u8 reserved_5[0xc]; 2350 u8 reserved_at_334[0xc];
2351 2351
2352 u8 qkey_violation_counter[0x10]; 2352 u8 qkey_violation_counter[0x10];
2353 u8 pkey_violation_counter[0x10]; 2353 u8 pkey_violation_counter[0x10];
2354 2354
2355 u8 reserved_6[0xca0]; 2355 u8 reserved_at_360[0xca0];
2356}; 2356};
2357 2357
2358struct mlx5_ifc_esw_vport_context_bits { 2358struct mlx5_ifc_esw_vport_context_bits {
2359 u8 reserved_0[0x3]; 2359 u8 reserved_at_0[0x3];
2360 u8 vport_svlan_strip[0x1]; 2360 u8 vport_svlan_strip[0x1];
2361 u8 vport_cvlan_strip[0x1]; 2361 u8 vport_cvlan_strip[0x1];
2362 u8 vport_svlan_insert[0x1]; 2362 u8 vport_svlan_insert[0x1];
2363 u8 vport_cvlan_insert[0x2]; 2363 u8 vport_cvlan_insert[0x2];
2364 u8 reserved_1[0x18]; 2364 u8 reserved_at_8[0x18];
2365 2365
2366 u8 reserved_2[0x20]; 2366 u8 reserved_at_20[0x20];
2367 2367
2368 u8 svlan_cfi[0x1]; 2368 u8 svlan_cfi[0x1];
2369 u8 svlan_pcp[0x3]; 2369 u8 svlan_pcp[0x3];
@@ -2372,7 +2372,7 @@ struct mlx5_ifc_esw_vport_context_bits {
2372 u8 cvlan_pcp[0x3]; 2372 u8 cvlan_pcp[0x3];
2373 u8 cvlan_id[0xc]; 2373 u8 cvlan_id[0xc];
2374 2374
2375 u8 reserved_3[0x7a0]; 2375 u8 reserved_at_60[0x7a0];
2376}; 2376};
2377 2377
2378enum { 2378enum {
@@ -2387,41 +2387,41 @@ enum {
2387 2387
2388struct mlx5_ifc_eqc_bits { 2388struct mlx5_ifc_eqc_bits {
2389 u8 status[0x4]; 2389 u8 status[0x4];
2390 u8 reserved_0[0x9]; 2390 u8 reserved_at_4[0x9];
2391 u8 ec[0x1]; 2391 u8 ec[0x1];
2392 u8 oi[0x1]; 2392 u8 oi[0x1];
2393 u8 reserved_1[0x5]; 2393 u8 reserved_at_f[0x5];
2394 u8 st[0x4]; 2394 u8 st[0x4];
2395 u8 reserved_2[0x8]; 2395 u8 reserved_at_18[0x8];
2396 2396
2397 u8 reserved_3[0x20]; 2397 u8 reserved_at_20[0x20];
2398 2398
2399 u8 reserved_4[0x14]; 2399 u8 reserved_at_40[0x14];
2400 u8 page_offset[0x6]; 2400 u8 page_offset[0x6];
2401 u8 reserved_5[0x6]; 2401 u8 reserved_at_5a[0x6];
2402 2402
2403 u8 reserved_6[0x3]; 2403 u8 reserved_at_60[0x3];
2404 u8 log_eq_size[0x5]; 2404 u8 log_eq_size[0x5];
2405 u8 uar_page[0x18]; 2405 u8 uar_page[0x18];
2406 2406
2407 u8 reserved_7[0x20]; 2407 u8 reserved_at_80[0x20];
2408 2408
2409 u8 reserved_8[0x18]; 2409 u8 reserved_at_a0[0x18];
2410 u8 intr[0x8]; 2410 u8 intr[0x8];
2411 2411
2412 u8 reserved_9[0x3]; 2412 u8 reserved_at_c0[0x3];
2413 u8 log_page_size[0x5]; 2413 u8 log_page_size[0x5];
2414 u8 reserved_10[0x18]; 2414 u8 reserved_at_c8[0x18];
2415 2415
2416 u8 reserved_11[0x60]; 2416 u8 reserved_at_e0[0x60];
2417 2417
2418 u8 reserved_12[0x8]; 2418 u8 reserved_at_140[0x8];
2419 u8 consumer_counter[0x18]; 2419 u8 consumer_counter[0x18];
2420 2420
2421 u8 reserved_13[0x8]; 2421 u8 reserved_at_160[0x8];
2422 u8 producer_counter[0x18]; 2422 u8 producer_counter[0x18];
2423 2423
2424 u8 reserved_14[0x80]; 2424 u8 reserved_at_180[0x80];
2425}; 2425};
2426 2426
2427enum { 2427enum {
@@ -2445,14 +2445,14 @@ enum {
2445}; 2445};
2446 2446
2447struct mlx5_ifc_dctc_bits { 2447struct mlx5_ifc_dctc_bits {
2448 u8 reserved_0[0x4]; 2448 u8 reserved_at_0[0x4];
2449 u8 state[0x4]; 2449 u8 state[0x4];
2450 u8 reserved_1[0x18]; 2450 u8 reserved_at_8[0x18];
2451 2451
2452 u8 reserved_2[0x8]; 2452 u8 reserved_at_20[0x8];
2453 u8 user_index[0x18]; 2453 u8 user_index[0x18];
2454 2454
2455 u8 reserved_3[0x8]; 2455 u8 reserved_at_40[0x8];
2456 u8 cqn[0x18]; 2456 u8 cqn[0x18];
2457 2457
2458 u8 counter_set_id[0x8]; 2458 u8 counter_set_id[0x8];
@@ -2464,45 +2464,45 @@ struct mlx5_ifc_dctc_bits {
2464 u8 latency_sensitive[0x1]; 2464 u8 latency_sensitive[0x1];
2465 u8 rlky[0x1]; 2465 u8 rlky[0x1];
2466 u8 free_ar[0x1]; 2466 u8 free_ar[0x1];
2467 u8 reserved_4[0xd]; 2467 u8 reserved_at_73[0xd];
2468 2468
2469 u8 reserved_5[0x8]; 2469 u8 reserved_at_80[0x8];
2470 u8 cs_res[0x8]; 2470 u8 cs_res[0x8];
2471 u8 reserved_6[0x3]; 2471 u8 reserved_at_90[0x3];
2472 u8 min_rnr_nak[0x5]; 2472 u8 min_rnr_nak[0x5];
2473 u8 reserved_7[0x8]; 2473 u8 reserved_at_98[0x8];
2474 2474
2475 u8 reserved_8[0x8]; 2475 u8 reserved_at_a0[0x8];
2476 u8 srqn[0x18]; 2476 u8 srqn[0x18];
2477 2477
2478 u8 reserved_9[0x8]; 2478 u8 reserved_at_c0[0x8];
2479 u8 pd[0x18]; 2479 u8 pd[0x18];
2480 2480
2481 u8 tclass[0x8]; 2481 u8 tclass[0x8];
2482 u8 reserved_10[0x4]; 2482 u8 reserved_at_e8[0x4];
2483 u8 flow_label[0x14]; 2483 u8 flow_label[0x14];
2484 2484
2485 u8 dc_access_key[0x40]; 2485 u8 dc_access_key[0x40];
2486 2486
2487 u8 reserved_11[0x5]; 2487 u8 reserved_at_140[0x5];
2488 u8 mtu[0x3]; 2488 u8 mtu[0x3];
2489 u8 port[0x8]; 2489 u8 port[0x8];
2490 u8 pkey_index[0x10]; 2490 u8 pkey_index[0x10];
2491 2491
2492 u8 reserved_12[0x8]; 2492 u8 reserved_at_160[0x8];
2493 u8 my_addr_index[0x8]; 2493 u8 my_addr_index[0x8];
2494 u8 reserved_13[0x8]; 2494 u8 reserved_at_170[0x8];
2495 u8 hop_limit[0x8]; 2495 u8 hop_limit[0x8];
2496 2496
2497 u8 dc_access_key_violation_count[0x20]; 2497 u8 dc_access_key_violation_count[0x20];
2498 2498
2499 u8 reserved_14[0x14]; 2499 u8 reserved_at_1a0[0x14];
2500 u8 dei_cfi[0x1]; 2500 u8 dei_cfi[0x1];
2501 u8 eth_prio[0x3]; 2501 u8 eth_prio[0x3];
2502 u8 ecn[0x2]; 2502 u8 ecn[0x2];
2503 u8 dscp[0x6]; 2503 u8 dscp[0x6];
2504 2504
2505 u8 reserved_15[0x40]; 2505 u8 reserved_at_1c0[0x40];
2506}; 2506};
2507 2507
2508enum { 2508enum {
@@ -2524,54 +2524,54 @@ enum {
2524 2524
2525struct mlx5_ifc_cqc_bits { 2525struct mlx5_ifc_cqc_bits {
2526 u8 status[0x4]; 2526 u8 status[0x4];
2527 u8 reserved_0[0x4]; 2527 u8 reserved_at_4[0x4];
2528 u8 cqe_sz[0x3]; 2528 u8 cqe_sz[0x3];
2529 u8 cc[0x1]; 2529 u8 cc[0x1];
2530 u8 reserved_1[0x1]; 2530 u8 reserved_at_c[0x1];
2531 u8 scqe_break_moderation_en[0x1]; 2531 u8 scqe_break_moderation_en[0x1];
2532 u8 oi[0x1]; 2532 u8 oi[0x1];
2533 u8 reserved_2[0x2]; 2533 u8 reserved_at_f[0x2];
2534 u8 cqe_zip_en[0x1]; 2534 u8 cqe_zip_en[0x1];
2535 u8 mini_cqe_res_format[0x2]; 2535 u8 mini_cqe_res_format[0x2];
2536 u8 st[0x4]; 2536 u8 st[0x4];
2537 u8 reserved_3[0x8]; 2537 u8 reserved_at_18[0x8];
2538 2538
2539 u8 reserved_4[0x20]; 2539 u8 reserved_at_20[0x20];
2540 2540
2541 u8 reserved_5[0x14]; 2541 u8 reserved_at_40[0x14];
2542 u8 page_offset[0x6]; 2542 u8 page_offset[0x6];
2543 u8 reserved_6[0x6]; 2543 u8 reserved_at_5a[0x6];
2544 2544
2545 u8 reserved_7[0x3]; 2545 u8 reserved_at_60[0x3];
2546 u8 log_cq_size[0x5]; 2546 u8 log_cq_size[0x5];
2547 u8 uar_page[0x18]; 2547 u8 uar_page[0x18];
2548 2548
2549 u8 reserved_8[0x4]; 2549 u8 reserved_at_80[0x4];
2550 u8 cq_period[0xc]; 2550 u8 cq_period[0xc];
2551 u8 cq_max_count[0x10]; 2551 u8 cq_max_count[0x10];
2552 2552
2553 u8 reserved_9[0x18]; 2553 u8 reserved_at_a0[0x18];
2554 u8 c_eqn[0x8]; 2554 u8 c_eqn[0x8];
2555 2555
2556 u8 reserved_10[0x3]; 2556 u8 reserved_at_c0[0x3];
2557 u8 log_page_size[0x5]; 2557 u8 log_page_size[0x5];
2558 u8 reserved_11[0x18]; 2558 u8 reserved_at_c8[0x18];
2559 2559
2560 u8 reserved_12[0x20]; 2560 u8 reserved_at_e0[0x20];
2561 2561
2562 u8 reserved_13[0x8]; 2562 u8 reserved_at_100[0x8];
2563 u8 last_notified_index[0x18]; 2563 u8 last_notified_index[0x18];
2564 2564
2565 u8 reserved_14[0x8]; 2565 u8 reserved_at_120[0x8];
2566 u8 last_solicit_index[0x18]; 2566 u8 last_solicit_index[0x18];
2567 2567
2568 u8 reserved_15[0x8]; 2568 u8 reserved_at_140[0x8];
2569 u8 consumer_counter[0x18]; 2569 u8 consumer_counter[0x18];
2570 2570
2571 u8 reserved_16[0x8]; 2571 u8 reserved_at_160[0x8];
2572 u8 producer_counter[0x18]; 2572 u8 producer_counter[0x18];
2573 2573
2574 u8 reserved_17[0x40]; 2574 u8 reserved_at_180[0x40];
2575 2575
2576 u8 dbr_addr[0x40]; 2576 u8 dbr_addr[0x40];
2577}; 2577};
@@ -2580,16 +2580,16 @@ union mlx5_ifc_cong_control_roce_ecn_auto_bits {
2580 struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp; 2580 struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp;
2581 struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp; 2581 struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp;
2582 struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np; 2582 struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np;
2583 u8 reserved_0[0x800]; 2583 u8 reserved_at_0[0x800];
2584}; 2584};
2585 2585
2586struct mlx5_ifc_query_adapter_param_block_bits { 2586struct mlx5_ifc_query_adapter_param_block_bits {
2587 u8 reserved_0[0xc0]; 2587 u8 reserved_at_0[0xc0];
2588 2588
2589 u8 reserved_1[0x8]; 2589 u8 reserved_at_c0[0x8];
2590 u8 ieee_vendor_id[0x18]; 2590 u8 ieee_vendor_id[0x18];
2591 2591
2592 u8 reserved_2[0x10]; 2592 u8 reserved_at_e0[0x10];
2593 u8 vsd_vendor_id[0x10]; 2593 u8 vsd_vendor_id[0x10];
2594 2594
2595 u8 vsd[208][0x8]; 2595 u8 vsd[208][0x8];
@@ -2600,14 +2600,14 @@ struct mlx5_ifc_query_adapter_param_block_bits {
2600union mlx5_ifc_modify_field_select_resize_field_select_auto_bits { 2600union mlx5_ifc_modify_field_select_resize_field_select_auto_bits {
2601 struct mlx5_ifc_modify_field_select_bits modify_field_select; 2601 struct mlx5_ifc_modify_field_select_bits modify_field_select;
2602 struct mlx5_ifc_resize_field_select_bits resize_field_select; 2602 struct mlx5_ifc_resize_field_select_bits resize_field_select;
2603 u8 reserved_0[0x20]; 2603 u8 reserved_at_0[0x20];
2604}; 2604};
2605 2605
2606union mlx5_ifc_field_select_802_1_r_roce_auto_bits { 2606union mlx5_ifc_field_select_802_1_r_roce_auto_bits {
2607 struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp; 2607 struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp;
2608 struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp; 2608 struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp;
2609 struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np; 2609 struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np;
2610 u8 reserved_0[0x20]; 2610 u8 reserved_at_0[0x20];
2611}; 2611};
2612 2612
2613union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits { 2613union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
@@ -2619,7 +2619,7 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
2619 struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; 2619 struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
2620 struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout; 2620 struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
2621 struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; 2621 struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
2622 u8 reserved_0[0x7c0]; 2622 u8 reserved_at_0[0x7c0];
2623}; 2623};
2624 2624
2625union mlx5_ifc_event_auto_bits { 2625union mlx5_ifc_event_auto_bits {
@@ -2635,23 +2635,23 @@ union mlx5_ifc_event_auto_bits {
2635 struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event; 2635 struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event;
2636 struct mlx5_ifc_stall_vl_event_bits stall_vl_event; 2636 struct mlx5_ifc_stall_vl_event_bits stall_vl_event;
2637 struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event; 2637 struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event;
2638 u8 reserved_0[0xe0]; 2638 u8 reserved_at_0[0xe0];
2639}; 2639};
2640 2640
2641struct mlx5_ifc_health_buffer_bits { 2641struct mlx5_ifc_health_buffer_bits {
2642 u8 reserved_0[0x100]; 2642 u8 reserved_at_0[0x100];
2643 2643
2644 u8 assert_existptr[0x20]; 2644 u8 assert_existptr[0x20];
2645 2645
2646 u8 assert_callra[0x20]; 2646 u8 assert_callra[0x20];
2647 2647
2648 u8 reserved_1[0x40]; 2648 u8 reserved_at_140[0x40];
2649 2649
2650 u8 fw_version[0x20]; 2650 u8 fw_version[0x20];
2651 2651
2652 u8 hw_id[0x20]; 2652 u8 hw_id[0x20];
2653 2653
2654 u8 reserved_2[0x20]; 2654 u8 reserved_at_1c0[0x20];
2655 2655
2656 u8 irisc_index[0x8]; 2656 u8 irisc_index[0x8];
2657 u8 synd[0x8]; 2657 u8 synd[0x8];
@@ -2660,20 +2660,20 @@ struct mlx5_ifc_health_buffer_bits {
2660 2660
2661struct mlx5_ifc_register_loopback_control_bits { 2661struct mlx5_ifc_register_loopback_control_bits {
2662 u8 no_lb[0x1]; 2662 u8 no_lb[0x1];
2663 u8 reserved_0[0x7]; 2663 u8 reserved_at_1[0x7];
2664 u8 port[0x8]; 2664 u8 port[0x8];
2665 u8 reserved_1[0x10]; 2665 u8 reserved_at_10[0x10];
2666 2666
2667 u8 reserved_2[0x60]; 2667 u8 reserved_at_20[0x60];
2668}; 2668};
2669 2669
2670struct mlx5_ifc_teardown_hca_out_bits { 2670struct mlx5_ifc_teardown_hca_out_bits {
2671 u8 status[0x8]; 2671 u8 status[0x8];
2672 u8 reserved_0[0x18]; 2672 u8 reserved_at_8[0x18];
2673 2673
2674 u8 syndrome[0x20]; 2674 u8 syndrome[0x20];
2675 2675
2676 u8 reserved_1[0x40]; 2676 u8 reserved_at_40[0x40];
2677}; 2677};
2678 2678
2679enum { 2679enum {
@@ -2683,108 +2683,108 @@ enum {
2683 2683
2684struct mlx5_ifc_teardown_hca_in_bits { 2684struct mlx5_ifc_teardown_hca_in_bits {
2685 u8 opcode[0x10]; 2685 u8 opcode[0x10];
2686 u8 reserved_0[0x10]; 2686 u8 reserved_at_10[0x10];
2687 2687
2688 u8 reserved_1[0x10]; 2688 u8 reserved_at_20[0x10];
2689 u8 op_mod[0x10]; 2689 u8 op_mod[0x10];
2690 2690
2691 u8 reserved_2[0x10]; 2691 u8 reserved_at_40[0x10];
2692 u8 profile[0x10]; 2692 u8 profile[0x10];
2693 2693
2694 u8 reserved_3[0x20]; 2694 u8 reserved_at_60[0x20];
2695}; 2695};
2696 2696
2697struct mlx5_ifc_sqerr2rts_qp_out_bits { 2697struct mlx5_ifc_sqerr2rts_qp_out_bits {
2698 u8 status[0x8]; 2698 u8 status[0x8];
2699 u8 reserved_0[0x18]; 2699 u8 reserved_at_8[0x18];
2700 2700
2701 u8 syndrome[0x20]; 2701 u8 syndrome[0x20];
2702 2702
2703 u8 reserved_1[0x40]; 2703 u8 reserved_at_40[0x40];
2704}; 2704};
2705 2705
2706struct mlx5_ifc_sqerr2rts_qp_in_bits { 2706struct mlx5_ifc_sqerr2rts_qp_in_bits {
2707 u8 opcode[0x10]; 2707 u8 opcode[0x10];
2708 u8 reserved_0[0x10]; 2708 u8 reserved_at_10[0x10];
2709 2709
2710 u8 reserved_1[0x10]; 2710 u8 reserved_at_20[0x10];
2711 u8 op_mod[0x10]; 2711 u8 op_mod[0x10];
2712 2712
2713 u8 reserved_2[0x8]; 2713 u8 reserved_at_40[0x8];
2714 u8 qpn[0x18]; 2714 u8 qpn[0x18];
2715 2715
2716 u8 reserved_3[0x20]; 2716 u8 reserved_at_60[0x20];
2717 2717
2718 u8 opt_param_mask[0x20]; 2718 u8 opt_param_mask[0x20];
2719 2719
2720 u8 reserved_4[0x20]; 2720 u8 reserved_at_a0[0x20];
2721 2721
2722 struct mlx5_ifc_qpc_bits qpc; 2722 struct mlx5_ifc_qpc_bits qpc;
2723 2723
2724 u8 reserved_5[0x80]; 2724 u8 reserved_at_800[0x80];
2725}; 2725};
2726 2726
2727struct mlx5_ifc_sqd2rts_qp_out_bits { 2727struct mlx5_ifc_sqd2rts_qp_out_bits {
2728 u8 status[0x8]; 2728 u8 status[0x8];
2729 u8 reserved_0[0x18]; 2729 u8 reserved_at_8[0x18];
2730 2730
2731 u8 syndrome[0x20]; 2731 u8 syndrome[0x20];
2732 2732
2733 u8 reserved_1[0x40]; 2733 u8 reserved_at_40[0x40];
2734}; 2734};
2735 2735
2736struct mlx5_ifc_sqd2rts_qp_in_bits { 2736struct mlx5_ifc_sqd2rts_qp_in_bits {
2737 u8 opcode[0x10]; 2737 u8 opcode[0x10];
2738 u8 reserved_0[0x10]; 2738 u8 reserved_at_10[0x10];
2739 2739
2740 u8 reserved_1[0x10]; 2740 u8 reserved_at_20[0x10];
2741 u8 op_mod[0x10]; 2741 u8 op_mod[0x10];
2742 2742
2743 u8 reserved_2[0x8]; 2743 u8 reserved_at_40[0x8];
2744 u8 qpn[0x18]; 2744 u8 qpn[0x18];
2745 2745
2746 u8 reserved_3[0x20]; 2746 u8 reserved_at_60[0x20];
2747 2747
2748 u8 opt_param_mask[0x20]; 2748 u8 opt_param_mask[0x20];
2749 2749
2750 u8 reserved_4[0x20]; 2750 u8 reserved_at_a0[0x20];
2751 2751
2752 struct mlx5_ifc_qpc_bits qpc; 2752 struct mlx5_ifc_qpc_bits qpc;
2753 2753
2754 u8 reserved_5[0x80]; 2754 u8 reserved_at_800[0x80];
2755}; 2755};
2756 2756
2757struct mlx5_ifc_set_roce_address_out_bits { 2757struct mlx5_ifc_set_roce_address_out_bits {
2758 u8 status[0x8]; 2758 u8 status[0x8];
2759 u8 reserved_0[0x18]; 2759 u8 reserved_at_8[0x18];
2760 2760
2761 u8 syndrome[0x20]; 2761 u8 syndrome[0x20];
2762 2762
2763 u8 reserved_1[0x40]; 2763 u8 reserved_at_40[0x40];
2764}; 2764};
2765 2765
2766struct mlx5_ifc_set_roce_address_in_bits { 2766struct mlx5_ifc_set_roce_address_in_bits {
2767 u8 opcode[0x10]; 2767 u8 opcode[0x10];
2768 u8 reserved_0[0x10]; 2768 u8 reserved_at_10[0x10];
2769 2769
2770 u8 reserved_1[0x10]; 2770 u8 reserved_at_20[0x10];
2771 u8 op_mod[0x10]; 2771 u8 op_mod[0x10];
2772 2772
2773 u8 roce_address_index[0x10]; 2773 u8 roce_address_index[0x10];
2774 u8 reserved_2[0x10]; 2774 u8 reserved_at_50[0x10];
2775 2775
2776 u8 reserved_3[0x20]; 2776 u8 reserved_at_60[0x20];
2777 2777
2778 struct mlx5_ifc_roce_addr_layout_bits roce_address; 2778 struct mlx5_ifc_roce_addr_layout_bits roce_address;
2779}; 2779};
2780 2780
2781struct mlx5_ifc_set_mad_demux_out_bits { 2781struct mlx5_ifc_set_mad_demux_out_bits {
2782 u8 status[0x8]; 2782 u8 status[0x8];
2783 u8 reserved_0[0x18]; 2783 u8 reserved_at_8[0x18];
2784 2784
2785 u8 syndrome[0x20]; 2785 u8 syndrome[0x20];
2786 2786
2787 u8 reserved_1[0x40]; 2787 u8 reserved_at_40[0x40];
2788}; 2788};
2789 2789
2790enum { 2790enum {
@@ -2794,89 +2794,89 @@ enum {
2794 2794
2795struct mlx5_ifc_set_mad_demux_in_bits { 2795struct mlx5_ifc_set_mad_demux_in_bits {
2796 u8 opcode[0x10]; 2796 u8 opcode[0x10];
2797 u8 reserved_0[0x10]; 2797 u8 reserved_at_10[0x10];
2798 2798
2799 u8 reserved_1[0x10]; 2799 u8 reserved_at_20[0x10];
2800 u8 op_mod[0x10]; 2800 u8 op_mod[0x10];
2801 2801
2802 u8 reserved_2[0x20]; 2802 u8 reserved_at_40[0x20];
2803 2803
2804 u8 reserved_3[0x6]; 2804 u8 reserved_at_60[0x6];
2805 u8 demux_mode[0x2]; 2805 u8 demux_mode[0x2];
2806 u8 reserved_4[0x18]; 2806 u8 reserved_at_68[0x18];
2807}; 2807};
2808 2808
2809struct mlx5_ifc_set_l2_table_entry_out_bits { 2809struct mlx5_ifc_set_l2_table_entry_out_bits {
2810 u8 status[0x8]; 2810 u8 status[0x8];
2811 u8 reserved_0[0x18]; 2811 u8 reserved_at_8[0x18];
2812 2812
2813 u8 syndrome[0x20]; 2813 u8 syndrome[0x20];
2814 2814
2815 u8 reserved_1[0x40]; 2815 u8 reserved_at_40[0x40];
2816}; 2816};
2817 2817
2818struct mlx5_ifc_set_l2_table_entry_in_bits { 2818struct mlx5_ifc_set_l2_table_entry_in_bits {
2819 u8 opcode[0x10]; 2819 u8 opcode[0x10];
2820 u8 reserved_0[0x10]; 2820 u8 reserved_at_10[0x10];
2821 2821
2822 u8 reserved_1[0x10]; 2822 u8 reserved_at_20[0x10];
2823 u8 op_mod[0x10]; 2823 u8 op_mod[0x10];
2824 2824
2825 u8 reserved_2[0x60]; 2825 u8 reserved_at_40[0x60];
2826 2826
2827 u8 reserved_3[0x8]; 2827 u8 reserved_at_a0[0x8];
2828 u8 table_index[0x18]; 2828 u8 table_index[0x18];
2829 2829
2830 u8 reserved_4[0x20]; 2830 u8 reserved_at_c0[0x20];
2831 2831
2832 u8 reserved_5[0x13]; 2832 u8 reserved_at_e0[0x13];
2833 u8 vlan_valid[0x1]; 2833 u8 vlan_valid[0x1];
2834 u8 vlan[0xc]; 2834 u8 vlan[0xc];
2835 2835
2836 struct mlx5_ifc_mac_address_layout_bits mac_address; 2836 struct mlx5_ifc_mac_address_layout_bits mac_address;
2837 2837
2838 u8 reserved_6[0xc0]; 2838 u8 reserved_at_140[0xc0];
2839}; 2839};
2840 2840
2841struct mlx5_ifc_set_issi_out_bits { 2841struct mlx5_ifc_set_issi_out_bits {
2842 u8 status[0x8]; 2842 u8 status[0x8];
2843 u8 reserved_0[0x18]; 2843 u8 reserved_at_8[0x18];
2844 2844
2845 u8 syndrome[0x20]; 2845 u8 syndrome[0x20];
2846 2846
2847 u8 reserved_1[0x40]; 2847 u8 reserved_at_40[0x40];
2848}; 2848};
2849 2849
2850struct mlx5_ifc_set_issi_in_bits { 2850struct mlx5_ifc_set_issi_in_bits {
2851 u8 opcode[0x10]; 2851 u8 opcode[0x10];
2852 u8 reserved_0[0x10]; 2852 u8 reserved_at_10[0x10];
2853 2853
2854 u8 reserved_1[0x10]; 2854 u8 reserved_at_20[0x10];
2855 u8 op_mod[0x10]; 2855 u8 op_mod[0x10];
2856 2856
2857 u8 reserved_2[0x10]; 2857 u8 reserved_at_40[0x10];
2858 u8 current_issi[0x10]; 2858 u8 current_issi[0x10];
2859 2859
2860 u8 reserved_3[0x20]; 2860 u8 reserved_at_60[0x20];
2861}; 2861};
2862 2862
2863struct mlx5_ifc_set_hca_cap_out_bits { 2863struct mlx5_ifc_set_hca_cap_out_bits {
2864 u8 status[0x8]; 2864 u8 status[0x8];
2865 u8 reserved_0[0x18]; 2865 u8 reserved_at_8[0x18];
2866 2866
2867 u8 syndrome[0x20]; 2867 u8 syndrome[0x20];
2868 2868
2869 u8 reserved_1[0x40]; 2869 u8 reserved_at_40[0x40];
2870}; 2870};
2871 2871
2872struct mlx5_ifc_set_hca_cap_in_bits { 2872struct mlx5_ifc_set_hca_cap_in_bits {
2873 u8 opcode[0x10]; 2873 u8 opcode[0x10];
2874 u8 reserved_0[0x10]; 2874 u8 reserved_at_10[0x10];
2875 2875
2876 u8 reserved_1[0x10]; 2876 u8 reserved_at_20[0x10];
2877 u8 op_mod[0x10]; 2877 u8 op_mod[0x10];
2878 2878
2879 u8 reserved_2[0x40]; 2879 u8 reserved_at_40[0x40];
2880 2880
2881 union mlx5_ifc_hca_cap_union_bits capability; 2881 union mlx5_ifc_hca_cap_union_bits capability;
2882}; 2882};
@@ -2890,156 +2890,156 @@ enum {
2890 2890
2891struct mlx5_ifc_set_fte_out_bits { 2891struct mlx5_ifc_set_fte_out_bits {
2892 u8 status[0x8]; 2892 u8 status[0x8];
2893 u8 reserved_0[0x18]; 2893 u8 reserved_at_8[0x18];
2894 2894
2895 u8 syndrome[0x20]; 2895 u8 syndrome[0x20];
2896 2896
2897 u8 reserved_1[0x40]; 2897 u8 reserved_at_40[0x40];
2898}; 2898};
2899 2899
2900struct mlx5_ifc_set_fte_in_bits { 2900struct mlx5_ifc_set_fte_in_bits {
2901 u8 opcode[0x10]; 2901 u8 opcode[0x10];
2902 u8 reserved_0[0x10]; 2902 u8 reserved_at_10[0x10];
2903 2903
2904 u8 reserved_1[0x10]; 2904 u8 reserved_at_20[0x10];
2905 u8 op_mod[0x10]; 2905 u8 op_mod[0x10];
2906 2906
2907 u8 reserved_2[0x40]; 2907 u8 reserved_at_40[0x40];
2908 2908
2909 u8 table_type[0x8]; 2909 u8 table_type[0x8];
2910 u8 reserved_3[0x18]; 2910 u8 reserved_at_88[0x18];
2911 2911
2912 u8 reserved_4[0x8]; 2912 u8 reserved_at_a0[0x8];
2913 u8 table_id[0x18]; 2913 u8 table_id[0x18];
2914 2914
2915 u8 reserved_5[0x18]; 2915 u8 reserved_at_c0[0x18];
2916 u8 modify_enable_mask[0x8]; 2916 u8 modify_enable_mask[0x8];
2917 2917
2918 u8 reserved_6[0x20]; 2918 u8 reserved_at_e0[0x20];
2919 2919
2920 u8 flow_index[0x20]; 2920 u8 flow_index[0x20];
2921 2921
2922 u8 reserved_7[0xe0]; 2922 u8 reserved_at_120[0xe0];
2923 2923
2924 struct mlx5_ifc_flow_context_bits flow_context; 2924 struct mlx5_ifc_flow_context_bits flow_context;
2925}; 2925};
2926 2926
2927struct mlx5_ifc_rts2rts_qp_out_bits { 2927struct mlx5_ifc_rts2rts_qp_out_bits {
2928 u8 status[0x8]; 2928 u8 status[0x8];
2929 u8 reserved_0[0x18]; 2929 u8 reserved_at_8[0x18];
2930 2930
2931 u8 syndrome[0x20]; 2931 u8 syndrome[0x20];
2932 2932
2933 u8 reserved_1[0x40]; 2933 u8 reserved_at_40[0x40];
2934}; 2934};
2935 2935
2936struct mlx5_ifc_rts2rts_qp_in_bits { 2936struct mlx5_ifc_rts2rts_qp_in_bits {
2937 u8 opcode[0x10]; 2937 u8 opcode[0x10];
2938 u8 reserved_0[0x10]; 2938 u8 reserved_at_10[0x10];
2939 2939
2940 u8 reserved_1[0x10]; 2940 u8 reserved_at_20[0x10];
2941 u8 op_mod[0x10]; 2941 u8 op_mod[0x10];
2942 2942
2943 u8 reserved_2[0x8]; 2943 u8 reserved_at_40[0x8];
2944 u8 qpn[0x18]; 2944 u8 qpn[0x18];
2945 2945
2946 u8 reserved_3[0x20]; 2946 u8 reserved_at_60[0x20];
2947 2947
2948 u8 opt_param_mask[0x20]; 2948 u8 opt_param_mask[0x20];
2949 2949
2950 u8 reserved_4[0x20]; 2950 u8 reserved_at_a0[0x20];
2951 2951
2952 struct mlx5_ifc_qpc_bits qpc; 2952 struct mlx5_ifc_qpc_bits qpc;
2953 2953
2954 u8 reserved_5[0x80]; 2954 u8 reserved_at_800[0x80];
2955}; 2955};
2956 2956
2957struct mlx5_ifc_rtr2rts_qp_out_bits { 2957struct mlx5_ifc_rtr2rts_qp_out_bits {
2958 u8 status[0x8]; 2958 u8 status[0x8];
2959 u8 reserved_0[0x18]; 2959 u8 reserved_at_8[0x18];
2960 2960
2961 u8 syndrome[0x20]; 2961 u8 syndrome[0x20];
2962 2962
2963 u8 reserved_1[0x40]; 2963 u8 reserved_at_40[0x40];
2964}; 2964};
2965 2965
2966struct mlx5_ifc_rtr2rts_qp_in_bits { 2966struct mlx5_ifc_rtr2rts_qp_in_bits {
2967 u8 opcode[0x10]; 2967 u8 opcode[0x10];
2968 u8 reserved_0[0x10]; 2968 u8 reserved_at_10[0x10];
2969 2969
2970 u8 reserved_1[0x10]; 2970 u8 reserved_at_20[0x10];
2971 u8 op_mod[0x10]; 2971 u8 op_mod[0x10];
2972 2972
2973 u8 reserved_2[0x8]; 2973 u8 reserved_at_40[0x8];
2974 u8 qpn[0x18]; 2974 u8 qpn[0x18];
2975 2975
2976 u8 reserved_3[0x20]; 2976 u8 reserved_at_60[0x20];
2977 2977
2978 u8 opt_param_mask[0x20]; 2978 u8 opt_param_mask[0x20];
2979 2979
2980 u8 reserved_4[0x20]; 2980 u8 reserved_at_a0[0x20];
2981 2981
2982 struct mlx5_ifc_qpc_bits qpc; 2982 struct mlx5_ifc_qpc_bits qpc;
2983 2983
2984 u8 reserved_5[0x80]; 2984 u8 reserved_at_800[0x80];
2985}; 2985};
2986 2986
2987struct mlx5_ifc_rst2init_qp_out_bits { 2987struct mlx5_ifc_rst2init_qp_out_bits {
2988 u8 status[0x8]; 2988 u8 status[0x8];
2989 u8 reserved_0[0x18]; 2989 u8 reserved_at_8[0x18];
2990 2990
2991 u8 syndrome[0x20]; 2991 u8 syndrome[0x20];
2992 2992
2993 u8 reserved_1[0x40]; 2993 u8 reserved_at_40[0x40];
2994}; 2994};
2995 2995
2996struct mlx5_ifc_rst2init_qp_in_bits { 2996struct mlx5_ifc_rst2init_qp_in_bits {
2997 u8 opcode[0x10]; 2997 u8 opcode[0x10];
2998 u8 reserved_0[0x10]; 2998 u8 reserved_at_10[0x10];
2999 2999
3000 u8 reserved_1[0x10]; 3000 u8 reserved_at_20[0x10];
3001 u8 op_mod[0x10]; 3001 u8 op_mod[0x10];
3002 3002
3003 u8 reserved_2[0x8]; 3003 u8 reserved_at_40[0x8];
3004 u8 qpn[0x18]; 3004 u8 qpn[0x18];
3005 3005
3006 u8 reserved_3[0x20]; 3006 u8 reserved_at_60[0x20];
3007 3007
3008 u8 opt_param_mask[0x20]; 3008 u8 opt_param_mask[0x20];
3009 3009
3010 u8 reserved_4[0x20]; 3010 u8 reserved_at_a0[0x20];
3011 3011
3012 struct mlx5_ifc_qpc_bits qpc; 3012 struct mlx5_ifc_qpc_bits qpc;
3013 3013
3014 u8 reserved_5[0x80]; 3014 u8 reserved_at_800[0x80];
3015}; 3015};
3016 3016
3017struct mlx5_ifc_query_xrc_srq_out_bits { 3017struct mlx5_ifc_query_xrc_srq_out_bits {
3018 u8 status[0x8]; 3018 u8 status[0x8];
3019 u8 reserved_0[0x18]; 3019 u8 reserved_at_8[0x18];
3020 3020
3021 u8 syndrome[0x20]; 3021 u8 syndrome[0x20];
3022 3022
3023 u8 reserved_1[0x40]; 3023 u8 reserved_at_40[0x40];
3024 3024
3025 struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; 3025 struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
3026 3026
3027 u8 reserved_2[0x600]; 3027 u8 reserved_at_280[0x600];
3028 3028
3029 u8 pas[0][0x40]; 3029 u8 pas[0][0x40];
3030}; 3030};
3031 3031
3032struct mlx5_ifc_query_xrc_srq_in_bits { 3032struct mlx5_ifc_query_xrc_srq_in_bits {
3033 u8 opcode[0x10]; 3033 u8 opcode[0x10];
3034 u8 reserved_0[0x10]; 3034 u8 reserved_at_10[0x10];
3035 3035
3036 u8 reserved_1[0x10]; 3036 u8 reserved_at_20[0x10];
3037 u8 op_mod[0x10]; 3037 u8 op_mod[0x10];
3038 3038
3039 u8 reserved_2[0x8]; 3039 u8 reserved_at_40[0x8];
3040 u8 xrc_srqn[0x18]; 3040 u8 xrc_srqn[0x18];
3041 3041
3042 u8 reserved_3[0x20]; 3042 u8 reserved_at_60[0x20];
3043}; 3043};
3044 3044
3045enum { 3045enum {
@@ -3049,13 +3049,13 @@ enum {
3049 3049
3050struct mlx5_ifc_query_vport_state_out_bits { 3050struct mlx5_ifc_query_vport_state_out_bits {
3051 u8 status[0x8]; 3051 u8 status[0x8];
3052 u8 reserved_0[0x18]; 3052 u8 reserved_at_8[0x18];
3053 3053
3054 u8 syndrome[0x20]; 3054 u8 syndrome[0x20];
3055 3055
3056 u8 reserved_1[0x20]; 3056 u8 reserved_at_40[0x20];
3057 3057
3058 u8 reserved_2[0x18]; 3058 u8 reserved_at_60[0x18];
3059 u8 admin_state[0x4]; 3059 u8 admin_state[0x4];
3060 u8 state[0x4]; 3060 u8 state[0x4];
3061}; 3061};
@@ -3067,25 +3067,25 @@ enum {
3067 3067
3068struct mlx5_ifc_query_vport_state_in_bits { 3068struct mlx5_ifc_query_vport_state_in_bits {
3069 u8 opcode[0x10]; 3069 u8 opcode[0x10];
3070 u8 reserved_0[0x10]; 3070 u8 reserved_at_10[0x10];
3071 3071
3072 u8 reserved_1[0x10]; 3072 u8 reserved_at_20[0x10];
3073 u8 op_mod[0x10]; 3073 u8 op_mod[0x10];
3074 3074
3075 u8 other_vport[0x1]; 3075 u8 other_vport[0x1];
3076 u8 reserved_2[0xf]; 3076 u8 reserved_at_41[0xf];
3077 u8 vport_number[0x10]; 3077 u8 vport_number[0x10];
3078 3078
3079 u8 reserved_3[0x20]; 3079 u8 reserved_at_60[0x20];
3080}; 3080};
3081 3081
3082struct mlx5_ifc_query_vport_counter_out_bits { 3082struct mlx5_ifc_query_vport_counter_out_bits {
3083 u8 status[0x8]; 3083 u8 status[0x8];
3084 u8 reserved_0[0x18]; 3084 u8 reserved_at_8[0x18];
3085 3085
3086 u8 syndrome[0x20]; 3086 u8 syndrome[0x20];
3087 3087
3088 u8 reserved_1[0x40]; 3088 u8 reserved_at_40[0x40];
3089 3089
3090 struct mlx5_ifc_traffic_counter_bits received_errors; 3090 struct mlx5_ifc_traffic_counter_bits received_errors;
3091 3091
@@ -3111,7 +3111,7 @@ struct mlx5_ifc_query_vport_counter_out_bits {
3111 3111
3112 struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast; 3112 struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast;
3113 3113
3114 u8 reserved_2[0xa00]; 3114 u8 reserved_at_680[0xa00];
3115}; 3115};
3116 3116
3117enum { 3117enum {
@@ -3120,328 +3120,328 @@ enum {
3120 3120
3121struct mlx5_ifc_query_vport_counter_in_bits { 3121struct mlx5_ifc_query_vport_counter_in_bits {
3122 u8 opcode[0x10]; 3122 u8 opcode[0x10];
3123 u8 reserved_0[0x10]; 3123 u8 reserved_at_10[0x10];
3124 3124
3125 u8 reserved_1[0x10]; 3125 u8 reserved_at_20[0x10];
3126 u8 op_mod[0x10]; 3126 u8 op_mod[0x10];
3127 3127
3128 u8 other_vport[0x1]; 3128 u8 other_vport[0x1];
3129 u8 reserved_2[0xf]; 3129 u8 reserved_at_41[0xf];
3130 u8 vport_number[0x10]; 3130 u8 vport_number[0x10];
3131 3131
3132 u8 reserved_3[0x60]; 3132 u8 reserved_at_60[0x60];
3133 3133
3134 u8 clear[0x1]; 3134 u8 clear[0x1];
3135 u8 reserved_4[0x1f]; 3135 u8 reserved_at_c1[0x1f];
3136 3136
3137 u8 reserved_5[0x20]; 3137 u8 reserved_at_e0[0x20];
3138}; 3138};
3139 3139
3140struct mlx5_ifc_query_tis_out_bits { 3140struct mlx5_ifc_query_tis_out_bits {
3141 u8 status[0x8]; 3141 u8 status[0x8];
3142 u8 reserved_0[0x18]; 3142 u8 reserved_at_8[0x18];
3143 3143
3144 u8 syndrome[0x20]; 3144 u8 syndrome[0x20];
3145 3145
3146 u8 reserved_1[0x40]; 3146 u8 reserved_at_40[0x40];
3147 3147
3148 struct mlx5_ifc_tisc_bits tis_context; 3148 struct mlx5_ifc_tisc_bits tis_context;
3149}; 3149};
3150 3150
3151struct mlx5_ifc_query_tis_in_bits { 3151struct mlx5_ifc_query_tis_in_bits {
3152 u8 opcode[0x10]; 3152 u8 opcode[0x10];
3153 u8 reserved_0[0x10]; 3153 u8 reserved_at_10[0x10];
3154 3154
3155 u8 reserved_1[0x10]; 3155 u8 reserved_at_20[0x10];
3156 u8 op_mod[0x10]; 3156 u8 op_mod[0x10];
3157 3157
3158 u8 reserved_2[0x8]; 3158 u8 reserved_at_40[0x8];
3159 u8 tisn[0x18]; 3159 u8 tisn[0x18];
3160 3160
3161 u8 reserved_3[0x20]; 3161 u8 reserved_at_60[0x20];
3162}; 3162};
3163 3163
3164struct mlx5_ifc_query_tir_out_bits { 3164struct mlx5_ifc_query_tir_out_bits {
3165 u8 status[0x8]; 3165 u8 status[0x8];
3166 u8 reserved_0[0x18]; 3166 u8 reserved_at_8[0x18];
3167 3167
3168 u8 syndrome[0x20]; 3168 u8 syndrome[0x20];
3169 3169
3170 u8 reserved_1[0xc0]; 3170 u8 reserved_at_40[0xc0];
3171 3171
3172 struct mlx5_ifc_tirc_bits tir_context; 3172 struct mlx5_ifc_tirc_bits tir_context;
3173}; 3173};
3174 3174
3175struct mlx5_ifc_query_tir_in_bits { 3175struct mlx5_ifc_query_tir_in_bits {
3176 u8 opcode[0x10]; 3176 u8 opcode[0x10];
3177 u8 reserved_0[0x10]; 3177 u8 reserved_at_10[0x10];
3178 3178
3179 u8 reserved_1[0x10]; 3179 u8 reserved_at_20[0x10];
3180 u8 op_mod[0x10]; 3180 u8 op_mod[0x10];
3181 3181
3182 u8 reserved_2[0x8]; 3182 u8 reserved_at_40[0x8];
3183 u8 tirn[0x18]; 3183 u8 tirn[0x18];
3184 3184
3185 u8 reserved_3[0x20]; 3185 u8 reserved_at_60[0x20];
3186}; 3186};
3187 3187
3188struct mlx5_ifc_query_srq_out_bits { 3188struct mlx5_ifc_query_srq_out_bits {
3189 u8 status[0x8]; 3189 u8 status[0x8];
3190 u8 reserved_0[0x18]; 3190 u8 reserved_at_8[0x18];
3191 3191
3192 u8 syndrome[0x20]; 3192 u8 syndrome[0x20];
3193 3193
3194 u8 reserved_1[0x40]; 3194 u8 reserved_at_40[0x40];
3195 3195
3196 struct mlx5_ifc_srqc_bits srq_context_entry; 3196 struct mlx5_ifc_srqc_bits srq_context_entry;
3197 3197
3198 u8 reserved_2[0x600]; 3198 u8 reserved_at_280[0x600];
3199 3199
3200 u8 pas[0][0x40]; 3200 u8 pas[0][0x40];
3201}; 3201};
3202 3202
3203struct mlx5_ifc_query_srq_in_bits { 3203struct mlx5_ifc_query_srq_in_bits {
3204 u8 opcode[0x10]; 3204 u8 opcode[0x10];
3205 u8 reserved_0[0x10]; 3205 u8 reserved_at_10[0x10];
3206 3206
3207 u8 reserved_1[0x10]; 3207 u8 reserved_at_20[0x10];
3208 u8 op_mod[0x10]; 3208 u8 op_mod[0x10];
3209 3209
3210 u8 reserved_2[0x8]; 3210 u8 reserved_at_40[0x8];
3211 u8 srqn[0x18]; 3211 u8 srqn[0x18];
3212 3212
3213 u8 reserved_3[0x20]; 3213 u8 reserved_at_60[0x20];
3214}; 3214};
3215 3215
3216struct mlx5_ifc_query_sq_out_bits { 3216struct mlx5_ifc_query_sq_out_bits {
3217 u8 status[0x8]; 3217 u8 status[0x8];
3218 u8 reserved_0[0x18]; 3218 u8 reserved_at_8[0x18];
3219 3219
3220 u8 syndrome[0x20]; 3220 u8 syndrome[0x20];
3221 3221
3222 u8 reserved_1[0xc0]; 3222 u8 reserved_at_40[0xc0];
3223 3223
3224 struct mlx5_ifc_sqc_bits sq_context; 3224 struct mlx5_ifc_sqc_bits sq_context;
3225}; 3225};
3226 3226
3227struct mlx5_ifc_query_sq_in_bits { 3227struct mlx5_ifc_query_sq_in_bits {
3228 u8 opcode[0x10]; 3228 u8 opcode[0x10];
3229 u8 reserved_0[0x10]; 3229 u8 reserved_at_10[0x10];
3230 3230
3231 u8 reserved_1[0x10]; 3231 u8 reserved_at_20[0x10];
3232 u8 op_mod[0x10]; 3232 u8 op_mod[0x10];
3233 3233
3234 u8 reserved_2[0x8]; 3234 u8 reserved_at_40[0x8];
3235 u8 sqn[0x18]; 3235 u8 sqn[0x18];
3236 3236
3237 u8 reserved_3[0x20]; 3237 u8 reserved_at_60[0x20];
3238}; 3238};
3239 3239
3240struct mlx5_ifc_query_special_contexts_out_bits { 3240struct mlx5_ifc_query_special_contexts_out_bits {
3241 u8 status[0x8]; 3241 u8 status[0x8];
3242 u8 reserved_0[0x18]; 3242 u8 reserved_at_8[0x18];
3243 3243
3244 u8 syndrome[0x20]; 3244 u8 syndrome[0x20];
3245 3245
3246 u8 reserved_1[0x20]; 3246 u8 reserved_at_40[0x20];
3247 3247
3248 u8 resd_lkey[0x20]; 3248 u8 resd_lkey[0x20];
3249}; 3249};
3250 3250
3251struct mlx5_ifc_query_special_contexts_in_bits { 3251struct mlx5_ifc_query_special_contexts_in_bits {
3252 u8 opcode[0x10]; 3252 u8 opcode[0x10];
3253 u8 reserved_0[0x10]; 3253 u8 reserved_at_10[0x10];
3254 3254
3255 u8 reserved_1[0x10]; 3255 u8 reserved_at_20[0x10];
3256 u8 op_mod[0x10]; 3256 u8 op_mod[0x10];
3257 3257
3258 u8 reserved_2[0x40]; 3258 u8 reserved_at_40[0x40];
3259}; 3259};
3260 3260
3261struct mlx5_ifc_query_rqt_out_bits { 3261struct mlx5_ifc_query_rqt_out_bits {
3262 u8 status[0x8]; 3262 u8 status[0x8];
3263 u8 reserved_0[0x18]; 3263 u8 reserved_at_8[0x18];
3264 3264
3265 u8 syndrome[0x20]; 3265 u8 syndrome[0x20];
3266 3266
3267 u8 reserved_1[0xc0]; 3267 u8 reserved_at_40[0xc0];
3268 3268
3269 struct mlx5_ifc_rqtc_bits rqt_context; 3269 struct mlx5_ifc_rqtc_bits rqt_context;
3270}; 3270};
3271 3271
3272struct mlx5_ifc_query_rqt_in_bits { 3272struct mlx5_ifc_query_rqt_in_bits {
3273 u8 opcode[0x10]; 3273 u8 opcode[0x10];
3274 u8 reserved_0[0x10]; 3274 u8 reserved_at_10[0x10];
3275 3275
3276 u8 reserved_1[0x10]; 3276 u8 reserved_at_20[0x10];
3277 u8 op_mod[0x10]; 3277 u8 op_mod[0x10];
3278 3278
3279 u8 reserved_2[0x8]; 3279 u8 reserved_at_40[0x8];
3280 u8 rqtn[0x18]; 3280 u8 rqtn[0x18];
3281 3281
3282 u8 reserved_3[0x20]; 3282 u8 reserved_at_60[0x20];
3283}; 3283};
3284 3284
3285struct mlx5_ifc_query_rq_out_bits { 3285struct mlx5_ifc_query_rq_out_bits {
3286 u8 status[0x8]; 3286 u8 status[0x8];
3287 u8 reserved_0[0x18]; 3287 u8 reserved_at_8[0x18];
3288 3288
3289 u8 syndrome[0x20]; 3289 u8 syndrome[0x20];
3290 3290
3291 u8 reserved_1[0xc0]; 3291 u8 reserved_at_40[0xc0];
3292 3292
3293 struct mlx5_ifc_rqc_bits rq_context; 3293 struct mlx5_ifc_rqc_bits rq_context;
3294}; 3294};
3295 3295
3296struct mlx5_ifc_query_rq_in_bits { 3296struct mlx5_ifc_query_rq_in_bits {
3297 u8 opcode[0x10]; 3297 u8 opcode[0x10];
3298 u8 reserved_0[0x10]; 3298 u8 reserved_at_10[0x10];
3299 3299
3300 u8 reserved_1[0x10]; 3300 u8 reserved_at_20[0x10];
3301 u8 op_mod[0x10]; 3301 u8 op_mod[0x10];
3302 3302
3303 u8 reserved_2[0x8]; 3303 u8 reserved_at_40[0x8];
3304 u8 rqn[0x18]; 3304 u8 rqn[0x18];
3305 3305
3306 u8 reserved_3[0x20]; 3306 u8 reserved_at_60[0x20];
3307}; 3307};
3308 3308
3309struct mlx5_ifc_query_roce_address_out_bits { 3309struct mlx5_ifc_query_roce_address_out_bits {
3310 u8 status[0x8]; 3310 u8 status[0x8];
3311 u8 reserved_0[0x18]; 3311 u8 reserved_at_8[0x18];
3312 3312
3313 u8 syndrome[0x20]; 3313 u8 syndrome[0x20];
3314 3314
3315 u8 reserved_1[0x40]; 3315 u8 reserved_at_40[0x40];
3316 3316
3317 struct mlx5_ifc_roce_addr_layout_bits roce_address; 3317 struct mlx5_ifc_roce_addr_layout_bits roce_address;
3318}; 3318};
3319 3319
3320struct mlx5_ifc_query_roce_address_in_bits { 3320struct mlx5_ifc_query_roce_address_in_bits {
3321 u8 opcode[0x10]; 3321 u8 opcode[0x10];
3322 u8 reserved_0[0x10]; 3322 u8 reserved_at_10[0x10];
3323 3323
3324 u8 reserved_1[0x10]; 3324 u8 reserved_at_20[0x10];
3325 u8 op_mod[0x10]; 3325 u8 op_mod[0x10];
3326 3326
3327 u8 roce_address_index[0x10]; 3327 u8 roce_address_index[0x10];
3328 u8 reserved_2[0x10]; 3328 u8 reserved_at_50[0x10];
3329 3329
3330 u8 reserved_3[0x20]; 3330 u8 reserved_at_60[0x20];
3331}; 3331};
3332 3332
3333struct mlx5_ifc_query_rmp_out_bits { 3333struct mlx5_ifc_query_rmp_out_bits {
3334 u8 status[0x8]; 3334 u8 status[0x8];
3335 u8 reserved_0[0x18]; 3335 u8 reserved_at_8[0x18];
3336 3336
3337 u8 syndrome[0x20]; 3337 u8 syndrome[0x20];
3338 3338
3339 u8 reserved_1[0xc0]; 3339 u8 reserved_at_40[0xc0];
3340 3340
3341 struct mlx5_ifc_rmpc_bits rmp_context; 3341 struct mlx5_ifc_rmpc_bits rmp_context;
3342}; 3342};
3343 3343
3344struct mlx5_ifc_query_rmp_in_bits { 3344struct mlx5_ifc_query_rmp_in_bits {
3345 u8 opcode[0x10]; 3345 u8 opcode[0x10];
3346 u8 reserved_0[0x10]; 3346 u8 reserved_at_10[0x10];
3347 3347
3348 u8 reserved_1[0x10]; 3348 u8 reserved_at_20[0x10];
3349 u8 op_mod[0x10]; 3349 u8 op_mod[0x10];
3350 3350
3351 u8 reserved_2[0x8]; 3351 u8 reserved_at_40[0x8];
3352 u8 rmpn[0x18]; 3352 u8 rmpn[0x18];
3353 3353
3354 u8 reserved_3[0x20]; 3354 u8 reserved_at_60[0x20];
3355}; 3355};
3356 3356
3357struct mlx5_ifc_query_qp_out_bits { 3357struct mlx5_ifc_query_qp_out_bits {
3358 u8 status[0x8]; 3358 u8 status[0x8];
3359 u8 reserved_0[0x18]; 3359 u8 reserved_at_8[0x18];
3360 3360
3361 u8 syndrome[0x20]; 3361 u8 syndrome[0x20];
3362 3362
3363 u8 reserved_1[0x40]; 3363 u8 reserved_at_40[0x40];
3364 3364
3365 u8 opt_param_mask[0x20]; 3365 u8 opt_param_mask[0x20];
3366 3366
3367 u8 reserved_2[0x20]; 3367 u8 reserved_at_a0[0x20];
3368 3368
3369 struct mlx5_ifc_qpc_bits qpc; 3369 struct mlx5_ifc_qpc_bits qpc;
3370 3370
3371 u8 reserved_3[0x80]; 3371 u8 reserved_at_800[0x80];
3372 3372
3373 u8 pas[0][0x40]; 3373 u8 pas[0][0x40];
3374}; 3374};
3375 3375
3376struct mlx5_ifc_query_qp_in_bits { 3376struct mlx5_ifc_query_qp_in_bits {
3377 u8 opcode[0x10]; 3377 u8 opcode[0x10];
3378 u8 reserved_0[0x10]; 3378 u8 reserved_at_10[0x10];
3379 3379
3380 u8 reserved_1[0x10]; 3380 u8 reserved_at_20[0x10];
3381 u8 op_mod[0x10]; 3381 u8 op_mod[0x10];
3382 3382
3383 u8 reserved_2[0x8]; 3383 u8 reserved_at_40[0x8];
3384 u8 qpn[0x18]; 3384 u8 qpn[0x18];
3385 3385
3386 u8 reserved_3[0x20]; 3386 u8 reserved_at_60[0x20];
3387}; 3387};
3388 3388
3389struct mlx5_ifc_query_q_counter_out_bits { 3389struct mlx5_ifc_query_q_counter_out_bits {
3390 u8 status[0x8]; 3390 u8 status[0x8];
3391 u8 reserved_0[0x18]; 3391 u8 reserved_at_8[0x18];
3392 3392
3393 u8 syndrome[0x20]; 3393 u8 syndrome[0x20];
3394 3394
3395 u8 reserved_1[0x40]; 3395 u8 reserved_at_40[0x40];
3396 3396
3397 u8 rx_write_requests[0x20]; 3397 u8 rx_write_requests[0x20];
3398 3398
3399 u8 reserved_2[0x20]; 3399 u8 reserved_at_a0[0x20];
3400 3400
3401 u8 rx_read_requests[0x20]; 3401 u8 rx_read_requests[0x20];
3402 3402
3403 u8 reserved_3[0x20]; 3403 u8 reserved_at_e0[0x20];
3404 3404
3405 u8 rx_atomic_requests[0x20]; 3405 u8 rx_atomic_requests[0x20];
3406 3406
3407 u8 reserved_4[0x20]; 3407 u8 reserved_at_120[0x20];
3408 3408
3409 u8 rx_dct_connect[0x20]; 3409 u8 rx_dct_connect[0x20];
3410 3410
3411 u8 reserved_5[0x20]; 3411 u8 reserved_at_160[0x20];
3412 3412
3413 u8 out_of_buffer[0x20]; 3413 u8 out_of_buffer[0x20];
3414 3414
3415 u8 reserved_6[0x20]; 3415 u8 reserved_at_1a0[0x20];
3416 3416
3417 u8 out_of_sequence[0x20]; 3417 u8 out_of_sequence[0x20];
3418 3418
3419 u8 reserved_7[0x620]; 3419 u8 reserved_at_1e0[0x620];
3420}; 3420};
3421 3421
3422struct mlx5_ifc_query_q_counter_in_bits { 3422struct mlx5_ifc_query_q_counter_in_bits {
3423 u8 opcode[0x10]; 3423 u8 opcode[0x10];
3424 u8 reserved_0[0x10]; 3424 u8 reserved_at_10[0x10];
3425 3425
3426 u8 reserved_1[0x10]; 3426 u8 reserved_at_20[0x10];
3427 u8 op_mod[0x10]; 3427 u8 op_mod[0x10];
3428 3428
3429 u8 reserved_2[0x80]; 3429 u8 reserved_at_40[0x80];
3430 3430
3431 u8 clear[0x1]; 3431 u8 clear[0x1];
3432 u8 reserved_3[0x1f]; 3432 u8 reserved_at_c1[0x1f];
3433 3433
3434 u8 reserved_4[0x18]; 3434 u8 reserved_at_e0[0x18];
3435 u8 counter_set_id[0x8]; 3435 u8 counter_set_id[0x8];
3436}; 3436};
3437 3437
3438struct mlx5_ifc_query_pages_out_bits { 3438struct mlx5_ifc_query_pages_out_bits {
3439 u8 status[0x8]; 3439 u8 status[0x8];
3440 u8 reserved_0[0x18]; 3440 u8 reserved_at_8[0x18];
3441 3441
3442 u8 syndrome[0x20]; 3442 u8 syndrome[0x20];
3443 3443
3444 u8 reserved_1[0x10]; 3444 u8 reserved_at_40[0x10];
3445 u8 function_id[0x10]; 3445 u8 function_id[0x10];
3446 3446
3447 u8 num_pages[0x20]; 3447 u8 num_pages[0x20];
@@ -3455,55 +3455,55 @@ enum {
3455 3455
3456struct mlx5_ifc_query_pages_in_bits { 3456struct mlx5_ifc_query_pages_in_bits {
3457 u8 opcode[0x10]; 3457 u8 opcode[0x10];
3458 u8 reserved_0[0x10]; 3458 u8 reserved_at_10[0x10];
3459 3459
3460 u8 reserved_1[0x10]; 3460 u8 reserved_at_20[0x10];
3461 u8 op_mod[0x10]; 3461 u8 op_mod[0x10];
3462 3462
3463 u8 reserved_2[0x10]; 3463 u8 reserved_at_40[0x10];
3464 u8 function_id[0x10]; 3464 u8 function_id[0x10];
3465 3465
3466 u8 reserved_3[0x20]; 3466 u8 reserved_at_60[0x20];
3467}; 3467};
3468 3468
3469struct mlx5_ifc_query_nic_vport_context_out_bits { 3469struct mlx5_ifc_query_nic_vport_context_out_bits {
3470 u8 status[0x8]; 3470 u8 status[0x8];
3471 u8 reserved_0[0x18]; 3471 u8 reserved_at_8[0x18];
3472 3472
3473 u8 syndrome[0x20]; 3473 u8 syndrome[0x20];
3474 3474
3475 u8 reserved_1[0x40]; 3475 u8 reserved_at_40[0x40];
3476 3476
3477 struct mlx5_ifc_nic_vport_context_bits nic_vport_context; 3477 struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
3478}; 3478};
3479 3479
3480struct mlx5_ifc_query_nic_vport_context_in_bits { 3480struct mlx5_ifc_query_nic_vport_context_in_bits {
3481 u8 opcode[0x10]; 3481 u8 opcode[0x10];
3482 u8 reserved_0[0x10]; 3482 u8 reserved_at_10[0x10];
3483 3483
3484 u8 reserved_1[0x10]; 3484 u8 reserved_at_20[0x10];
3485 u8 op_mod[0x10]; 3485 u8 op_mod[0x10];
3486 3486
3487 u8 other_vport[0x1]; 3487 u8 other_vport[0x1];
3488 u8 reserved_2[0xf]; 3488 u8 reserved_at_41[0xf];
3489 u8 vport_number[0x10]; 3489 u8 vport_number[0x10];
3490 3490
3491 u8 reserved_3[0x5]; 3491 u8 reserved_at_60[0x5];
3492 u8 allowed_list_type[0x3]; 3492 u8 allowed_list_type[0x3];
3493 u8 reserved_4[0x18]; 3493 u8 reserved_at_68[0x18];
3494}; 3494};
3495 3495
3496struct mlx5_ifc_query_mkey_out_bits { 3496struct mlx5_ifc_query_mkey_out_bits {
3497 u8 status[0x8]; 3497 u8 status[0x8];
3498 u8 reserved_0[0x18]; 3498 u8 reserved_at_8[0x18];
3499 3499
3500 u8 syndrome[0x20]; 3500 u8 syndrome[0x20];
3501 3501
3502 u8 reserved_1[0x40]; 3502 u8 reserved_at_40[0x40];
3503 3503
3504 struct mlx5_ifc_mkc_bits memory_key_mkey_entry; 3504 struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
3505 3505
3506 u8 reserved_2[0x600]; 3506 u8 reserved_at_280[0x600];
3507 3507
3508 u8 bsf0_klm0_pas_mtt0_1[16][0x8]; 3508 u8 bsf0_klm0_pas_mtt0_1[16][0x8];
3509 3509
@@ -3512,265 +3512,265 @@ struct mlx5_ifc_query_mkey_out_bits {
3512 3512
3513struct mlx5_ifc_query_mkey_in_bits { 3513struct mlx5_ifc_query_mkey_in_bits {
3514 u8 opcode[0x10]; 3514 u8 opcode[0x10];
3515 u8 reserved_0[0x10]; 3515 u8 reserved_at_10[0x10];
3516 3516
3517 u8 reserved_1[0x10]; 3517 u8 reserved_at_20[0x10];
3518 u8 op_mod[0x10]; 3518 u8 op_mod[0x10];
3519 3519
3520 u8 reserved_2[0x8]; 3520 u8 reserved_at_40[0x8];
3521 u8 mkey_index[0x18]; 3521 u8 mkey_index[0x18];
3522 3522
3523 u8 pg_access[0x1]; 3523 u8 pg_access[0x1];
3524 u8 reserved_3[0x1f]; 3524 u8 reserved_at_61[0x1f];
3525}; 3525};
3526 3526
3527struct mlx5_ifc_query_mad_demux_out_bits { 3527struct mlx5_ifc_query_mad_demux_out_bits {
3528 u8 status[0x8]; 3528 u8 status[0x8];
3529 u8 reserved_0[0x18]; 3529 u8 reserved_at_8[0x18];
3530 3530
3531 u8 syndrome[0x20]; 3531 u8 syndrome[0x20];
3532 3532
3533 u8 reserved_1[0x40]; 3533 u8 reserved_at_40[0x40];
3534 3534
3535 u8 mad_dumux_parameters_block[0x20]; 3535 u8 mad_dumux_parameters_block[0x20];
3536}; 3536};
3537 3537
3538struct mlx5_ifc_query_mad_demux_in_bits { 3538struct mlx5_ifc_query_mad_demux_in_bits {
3539 u8 opcode[0x10]; 3539 u8 opcode[0x10];
3540 u8 reserved_0[0x10]; 3540 u8 reserved_at_10[0x10];
3541 3541
3542 u8 reserved_1[0x10]; 3542 u8 reserved_at_20[0x10];
3543 u8 op_mod[0x10]; 3543 u8 op_mod[0x10];
3544 3544
3545 u8 reserved_2[0x40]; 3545 u8 reserved_at_40[0x40];
3546}; 3546};
3547 3547
3548struct mlx5_ifc_query_l2_table_entry_out_bits { 3548struct mlx5_ifc_query_l2_table_entry_out_bits {
3549 u8 status[0x8]; 3549 u8 status[0x8];
3550 u8 reserved_0[0x18]; 3550 u8 reserved_at_8[0x18];
3551 3551
3552 u8 syndrome[0x20]; 3552 u8 syndrome[0x20];
3553 3553
3554 u8 reserved_1[0xa0]; 3554 u8 reserved_at_40[0xa0];
3555 3555
3556 u8 reserved_2[0x13]; 3556 u8 reserved_at_e0[0x13];
3557 u8 vlan_valid[0x1]; 3557 u8 vlan_valid[0x1];
3558 u8 vlan[0xc]; 3558 u8 vlan[0xc];
3559 3559
3560 struct mlx5_ifc_mac_address_layout_bits mac_address; 3560 struct mlx5_ifc_mac_address_layout_bits mac_address;
3561 3561
3562 u8 reserved_3[0xc0]; 3562 u8 reserved_at_140[0xc0];
3563}; 3563};
3564 3564
3565struct mlx5_ifc_query_l2_table_entry_in_bits { 3565struct mlx5_ifc_query_l2_table_entry_in_bits {
3566 u8 opcode[0x10]; 3566 u8 opcode[0x10];
3567 u8 reserved_0[0x10]; 3567 u8 reserved_at_10[0x10];
3568 3568
3569 u8 reserved_1[0x10]; 3569 u8 reserved_at_20[0x10];
3570 u8 op_mod[0x10]; 3570 u8 op_mod[0x10];
3571 3571
3572 u8 reserved_2[0x60]; 3572 u8 reserved_at_40[0x60];
3573 3573
3574 u8 reserved_3[0x8]; 3574 u8 reserved_at_a0[0x8];
3575 u8 table_index[0x18]; 3575 u8 table_index[0x18];
3576 3576
3577 u8 reserved_4[0x140]; 3577 u8 reserved_at_c0[0x140];
3578}; 3578};
3579 3579
3580struct mlx5_ifc_query_issi_out_bits { 3580struct mlx5_ifc_query_issi_out_bits {
3581 u8 status[0x8]; 3581 u8 status[0x8];
3582 u8 reserved_0[0x18]; 3582 u8 reserved_at_8[0x18];
3583 3583
3584 u8 syndrome[0x20]; 3584 u8 syndrome[0x20];
3585 3585
3586 u8 reserved_1[0x10]; 3586 u8 reserved_at_40[0x10];
3587 u8 current_issi[0x10]; 3587 u8 current_issi[0x10];
3588 3588
3589 u8 reserved_2[0xa0]; 3589 u8 reserved_at_60[0xa0];
3590 3590
3591 u8 supported_issi_reserved[76][0x8]; 3591 u8 reserved_at_100[76][0x8];
3592 u8 supported_issi_dw0[0x20]; 3592 u8 supported_issi_dw0[0x20];
3593}; 3593};
3594 3594
3595struct mlx5_ifc_query_issi_in_bits { 3595struct mlx5_ifc_query_issi_in_bits {
3596 u8 opcode[0x10]; 3596 u8 opcode[0x10];
3597 u8 reserved_0[0x10]; 3597 u8 reserved_at_10[0x10];
3598 3598
3599 u8 reserved_1[0x10]; 3599 u8 reserved_at_20[0x10];
3600 u8 op_mod[0x10]; 3600 u8 op_mod[0x10];
3601 3601
3602 u8 reserved_2[0x40]; 3602 u8 reserved_at_40[0x40];
3603}; 3603};
3604 3604
3605struct mlx5_ifc_query_hca_vport_pkey_out_bits { 3605struct mlx5_ifc_query_hca_vport_pkey_out_bits {
3606 u8 status[0x8]; 3606 u8 status[0x8];
3607 u8 reserved_0[0x18]; 3607 u8 reserved_at_8[0x18];
3608 3608
3609 u8 syndrome[0x20]; 3609 u8 syndrome[0x20];
3610 3610
3611 u8 reserved_1[0x40]; 3611 u8 reserved_at_40[0x40];
3612 3612
3613 struct mlx5_ifc_pkey_bits pkey[0]; 3613 struct mlx5_ifc_pkey_bits pkey[0];
3614}; 3614};
3615 3615
3616struct mlx5_ifc_query_hca_vport_pkey_in_bits { 3616struct mlx5_ifc_query_hca_vport_pkey_in_bits {
3617 u8 opcode[0x10]; 3617 u8 opcode[0x10];
3618 u8 reserved_0[0x10]; 3618 u8 reserved_at_10[0x10];
3619 3619
3620 u8 reserved_1[0x10]; 3620 u8 reserved_at_20[0x10];
3621 u8 op_mod[0x10]; 3621 u8 op_mod[0x10];
3622 3622
3623 u8 other_vport[0x1]; 3623 u8 other_vport[0x1];
3624 u8 reserved_2[0xb]; 3624 u8 reserved_at_41[0xb];
3625 u8 port_num[0x4]; 3625 u8 port_num[0x4];
3626 u8 vport_number[0x10]; 3626 u8 vport_number[0x10];
3627 3627
3628 u8 reserved_3[0x10]; 3628 u8 reserved_at_60[0x10];
3629 u8 pkey_index[0x10]; 3629 u8 pkey_index[0x10];
3630}; 3630};
3631 3631
3632struct mlx5_ifc_query_hca_vport_gid_out_bits { 3632struct mlx5_ifc_query_hca_vport_gid_out_bits {
3633 u8 status[0x8]; 3633 u8 status[0x8];
3634 u8 reserved_0[0x18]; 3634 u8 reserved_at_8[0x18];
3635 3635
3636 u8 syndrome[0x20]; 3636 u8 syndrome[0x20];
3637 3637
3638 u8 reserved_1[0x20]; 3638 u8 reserved_at_40[0x20];
3639 3639
3640 u8 gids_num[0x10]; 3640 u8 gids_num[0x10];
3641 u8 reserved_2[0x10]; 3641 u8 reserved_at_70[0x10];
3642 3642
3643 struct mlx5_ifc_array128_auto_bits gid[0]; 3643 struct mlx5_ifc_array128_auto_bits gid[0];
3644}; 3644};
3645 3645
3646struct mlx5_ifc_query_hca_vport_gid_in_bits { 3646struct mlx5_ifc_query_hca_vport_gid_in_bits {
3647 u8 opcode[0x10]; 3647 u8 opcode[0x10];
3648 u8 reserved_0[0x10]; 3648 u8 reserved_at_10[0x10];
3649 3649
3650 u8 reserved_1[0x10]; 3650 u8 reserved_at_20[0x10];
3651 u8 op_mod[0x10]; 3651 u8 op_mod[0x10];
3652 3652
3653 u8 other_vport[0x1]; 3653 u8 other_vport[0x1];
3654 u8 reserved_2[0xb]; 3654 u8 reserved_at_41[0xb];
3655 u8 port_num[0x4]; 3655 u8 port_num[0x4];
3656 u8 vport_number[0x10]; 3656 u8 vport_number[0x10];
3657 3657
3658 u8 reserved_3[0x10]; 3658 u8 reserved_at_60[0x10];
3659 u8 gid_index[0x10]; 3659 u8 gid_index[0x10];
3660}; 3660};
3661 3661
3662struct mlx5_ifc_query_hca_vport_context_out_bits { 3662struct mlx5_ifc_query_hca_vport_context_out_bits {
3663 u8 status[0x8]; 3663 u8 status[0x8];
3664 u8 reserved_0[0x18]; 3664 u8 reserved_at_8[0x18];
3665 3665
3666 u8 syndrome[0x20]; 3666 u8 syndrome[0x20];
3667 3667
3668 u8 reserved_1[0x40]; 3668 u8 reserved_at_40[0x40];
3669 3669
3670 struct mlx5_ifc_hca_vport_context_bits hca_vport_context; 3670 struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
3671}; 3671};
3672 3672
3673struct mlx5_ifc_query_hca_vport_context_in_bits { 3673struct mlx5_ifc_query_hca_vport_context_in_bits {
3674 u8 opcode[0x10]; 3674 u8 opcode[0x10];
3675 u8 reserved_0[0x10]; 3675 u8 reserved_at_10[0x10];
3676 3676
3677 u8 reserved_1[0x10]; 3677 u8 reserved_at_20[0x10];
3678 u8 op_mod[0x10]; 3678 u8 op_mod[0x10];
3679 3679
3680 u8 other_vport[0x1]; 3680 u8 other_vport[0x1];
3681 u8 reserved_2[0xb]; 3681 u8 reserved_at_41[0xb];
3682 u8 port_num[0x4]; 3682 u8 port_num[0x4];
3683 u8 vport_number[0x10]; 3683 u8 vport_number[0x10];
3684 3684
3685 u8 reserved_3[0x20]; 3685 u8 reserved_at_60[0x20];
3686}; 3686};
3687 3687
3688struct mlx5_ifc_query_hca_cap_out_bits { 3688struct mlx5_ifc_query_hca_cap_out_bits {
3689 u8 status[0x8]; 3689 u8 status[0x8];
3690 u8 reserved_0[0x18]; 3690 u8 reserved_at_8[0x18];
3691 3691
3692 u8 syndrome[0x20]; 3692 u8 syndrome[0x20];
3693 3693
3694 u8 reserved_1[0x40]; 3694 u8 reserved_at_40[0x40];
3695 3695
3696 union mlx5_ifc_hca_cap_union_bits capability; 3696 union mlx5_ifc_hca_cap_union_bits capability;
3697}; 3697};
3698 3698
3699struct mlx5_ifc_query_hca_cap_in_bits { 3699struct mlx5_ifc_query_hca_cap_in_bits {
3700 u8 opcode[0x10]; 3700 u8 opcode[0x10];
3701 u8 reserved_0[0x10]; 3701 u8 reserved_at_10[0x10];
3702 3702
3703 u8 reserved_1[0x10]; 3703 u8 reserved_at_20[0x10];
3704 u8 op_mod[0x10]; 3704 u8 op_mod[0x10];
3705 3705
3706 u8 reserved_2[0x40]; 3706 u8 reserved_at_40[0x40];
3707}; 3707};
3708 3708
3709struct mlx5_ifc_query_flow_table_out_bits { 3709struct mlx5_ifc_query_flow_table_out_bits {
3710 u8 status[0x8]; 3710 u8 status[0x8];
3711 u8 reserved_0[0x18]; 3711 u8 reserved_at_8[0x18];
3712 3712
3713 u8 syndrome[0x20]; 3713 u8 syndrome[0x20];
3714 3714
3715 u8 reserved_1[0x80]; 3715 u8 reserved_at_40[0x80];
3716 3716
3717 u8 reserved_2[0x8]; 3717 u8 reserved_at_c0[0x8];
3718 u8 level[0x8]; 3718 u8 level[0x8];
3719 u8 reserved_3[0x8]; 3719 u8 reserved_at_d0[0x8];
3720 u8 log_size[0x8]; 3720 u8 log_size[0x8];
3721 3721
3722 u8 reserved_4[0x120]; 3722 u8 reserved_at_e0[0x120];
3723}; 3723};
3724 3724
3725struct mlx5_ifc_query_flow_table_in_bits { 3725struct mlx5_ifc_query_flow_table_in_bits {
3726 u8 opcode[0x10]; 3726 u8 opcode[0x10];
3727 u8 reserved_0[0x10]; 3727 u8 reserved_at_10[0x10];
3728 3728
3729 u8 reserved_1[0x10]; 3729 u8 reserved_at_20[0x10];
3730 u8 op_mod[0x10]; 3730 u8 op_mod[0x10];
3731 3731
3732 u8 reserved_2[0x40]; 3732 u8 reserved_at_40[0x40];
3733 3733
3734 u8 table_type[0x8]; 3734 u8 table_type[0x8];
3735 u8 reserved_3[0x18]; 3735 u8 reserved_at_88[0x18];
3736 3736
3737 u8 reserved_4[0x8]; 3737 u8 reserved_at_a0[0x8];
3738 u8 table_id[0x18]; 3738 u8 table_id[0x18];
3739 3739
3740 u8 reserved_5[0x140]; 3740 u8 reserved_at_c0[0x140];
3741}; 3741};
3742 3742
3743struct mlx5_ifc_query_fte_out_bits { 3743struct mlx5_ifc_query_fte_out_bits {
3744 u8 status[0x8]; 3744 u8 status[0x8];
3745 u8 reserved_0[0x18]; 3745 u8 reserved_at_8[0x18];
3746 3746
3747 u8 syndrome[0x20]; 3747 u8 syndrome[0x20];
3748 3748
3749 u8 reserved_1[0x1c0]; 3749 u8 reserved_at_40[0x1c0];
3750 3750
3751 struct mlx5_ifc_flow_context_bits flow_context; 3751 struct mlx5_ifc_flow_context_bits flow_context;
3752}; 3752};
3753 3753
3754struct mlx5_ifc_query_fte_in_bits { 3754struct mlx5_ifc_query_fte_in_bits {
3755 u8 opcode[0x10]; 3755 u8 opcode[0x10];
3756 u8 reserved_0[0x10]; 3756 u8 reserved_at_10[0x10];
3757 3757
3758 u8 reserved_1[0x10]; 3758 u8 reserved_at_20[0x10];
3759 u8 op_mod[0x10]; 3759 u8 op_mod[0x10];
3760 3760
3761 u8 reserved_2[0x40]; 3761 u8 reserved_at_40[0x40];
3762 3762
3763 u8 table_type[0x8]; 3763 u8 table_type[0x8];
3764 u8 reserved_3[0x18]; 3764 u8 reserved_at_88[0x18];
3765 3765
3766 u8 reserved_4[0x8]; 3766 u8 reserved_at_a0[0x8];
3767 u8 table_id[0x18]; 3767 u8 table_id[0x18];
3768 3768
3769 u8 reserved_5[0x40]; 3769 u8 reserved_at_c0[0x40];
3770 3770
3771 u8 flow_index[0x20]; 3771 u8 flow_index[0x20];
3772 3772
3773 u8 reserved_6[0xe0]; 3773 u8 reserved_at_120[0xe0];
3774}; 3774};
3775 3775
3776enum { 3776enum {
@@ -3781,84 +3781,84 @@ enum {
3781 3781
3782struct mlx5_ifc_query_flow_group_out_bits { 3782struct mlx5_ifc_query_flow_group_out_bits {
3783 u8 status[0x8]; 3783 u8 status[0x8];
3784 u8 reserved_0[0x18]; 3784 u8 reserved_at_8[0x18];
3785 3785
3786 u8 syndrome[0x20]; 3786 u8 syndrome[0x20];
3787 3787
3788 u8 reserved_1[0xa0]; 3788 u8 reserved_at_40[0xa0];
3789 3789
3790 u8 start_flow_index[0x20]; 3790 u8 start_flow_index[0x20];
3791 3791
3792 u8 reserved_2[0x20]; 3792 u8 reserved_at_100[0x20];
3793 3793
3794 u8 end_flow_index[0x20]; 3794 u8 end_flow_index[0x20];
3795 3795
3796 u8 reserved_3[0xa0]; 3796 u8 reserved_at_140[0xa0];
3797 3797
3798 u8 reserved_4[0x18]; 3798 u8 reserved_at_1e0[0x18];
3799 u8 match_criteria_enable[0x8]; 3799 u8 match_criteria_enable[0x8];
3800 3800
3801 struct mlx5_ifc_fte_match_param_bits match_criteria; 3801 struct mlx5_ifc_fte_match_param_bits match_criteria;
3802 3802
3803 u8 reserved_5[0xe00]; 3803 u8 reserved_at_1200[0xe00];
3804}; 3804};
3805 3805
3806struct mlx5_ifc_query_flow_group_in_bits { 3806struct mlx5_ifc_query_flow_group_in_bits {
3807 u8 opcode[0x10]; 3807 u8 opcode[0x10];
3808 u8 reserved_0[0x10]; 3808 u8 reserved_at_10[0x10];
3809 3809
3810 u8 reserved_1[0x10]; 3810 u8 reserved_at_20[0x10];
3811 u8 op_mod[0x10]; 3811 u8 op_mod[0x10];
3812 3812
3813 u8 reserved_2[0x40]; 3813 u8 reserved_at_40[0x40];
3814 3814
3815 u8 table_type[0x8]; 3815 u8 table_type[0x8];
3816 u8 reserved_3[0x18]; 3816 u8 reserved_at_88[0x18];
3817 3817
3818 u8 reserved_4[0x8]; 3818 u8 reserved_at_a0[0x8];
3819 u8 table_id[0x18]; 3819 u8 table_id[0x18];
3820 3820
3821 u8 group_id[0x20]; 3821 u8 group_id[0x20];
3822 3822
3823 u8 reserved_5[0x120]; 3823 u8 reserved_at_e0[0x120];
3824}; 3824};
3825 3825
3826struct mlx5_ifc_query_esw_vport_context_out_bits { 3826struct mlx5_ifc_query_esw_vport_context_out_bits {
3827 u8 status[0x8]; 3827 u8 status[0x8];
3828 u8 reserved_0[0x18]; 3828 u8 reserved_at_8[0x18];
3829 3829
3830 u8 syndrome[0x20]; 3830 u8 syndrome[0x20];
3831 3831
3832 u8 reserved_1[0x40]; 3832 u8 reserved_at_40[0x40];
3833 3833
3834 struct mlx5_ifc_esw_vport_context_bits esw_vport_context; 3834 struct mlx5_ifc_esw_vport_context_bits esw_vport_context;
3835}; 3835};
3836 3836
3837struct mlx5_ifc_query_esw_vport_context_in_bits { 3837struct mlx5_ifc_query_esw_vport_context_in_bits {
3838 u8 opcode[0x10]; 3838 u8 opcode[0x10];
3839 u8 reserved_0[0x10]; 3839 u8 reserved_at_10[0x10];
3840 3840
3841 u8 reserved_1[0x10]; 3841 u8 reserved_at_20[0x10];
3842 u8 op_mod[0x10]; 3842 u8 op_mod[0x10];
3843 3843
3844 u8 other_vport[0x1]; 3844 u8 other_vport[0x1];
3845 u8 reserved_2[0xf]; 3845 u8 reserved_at_41[0xf];
3846 u8 vport_number[0x10]; 3846 u8 vport_number[0x10];
3847 3847
3848 u8 reserved_3[0x20]; 3848 u8 reserved_at_60[0x20];
3849}; 3849};
3850 3850
3851struct mlx5_ifc_modify_esw_vport_context_out_bits { 3851struct mlx5_ifc_modify_esw_vport_context_out_bits {
3852 u8 status[0x8]; 3852 u8 status[0x8];
3853 u8 reserved_0[0x18]; 3853 u8 reserved_at_8[0x18];
3854 3854
3855 u8 syndrome[0x20]; 3855 u8 syndrome[0x20];
3856 3856
3857 u8 reserved_1[0x40]; 3857 u8 reserved_at_40[0x40];
3858}; 3858};
3859 3859
3860struct mlx5_ifc_esw_vport_context_fields_select_bits { 3860struct mlx5_ifc_esw_vport_context_fields_select_bits {
3861 u8 reserved[0x1c]; 3861 u8 reserved_at_0[0x1c];
3862 u8 vport_cvlan_insert[0x1]; 3862 u8 vport_cvlan_insert[0x1];
3863 u8 vport_svlan_insert[0x1]; 3863 u8 vport_svlan_insert[0x1];
3864 u8 vport_cvlan_strip[0x1]; 3864 u8 vport_cvlan_strip[0x1];
@@ -3867,13 +3867,13 @@ struct mlx5_ifc_esw_vport_context_fields_select_bits {
3867 3867
3868struct mlx5_ifc_modify_esw_vport_context_in_bits { 3868struct mlx5_ifc_modify_esw_vport_context_in_bits {
3869 u8 opcode[0x10]; 3869 u8 opcode[0x10];
3870 u8 reserved_0[0x10]; 3870 u8 reserved_at_10[0x10];
3871 3871
3872 u8 reserved_1[0x10]; 3872 u8 reserved_at_20[0x10];
3873 u8 op_mod[0x10]; 3873 u8 op_mod[0x10];
3874 3874
3875 u8 other_vport[0x1]; 3875 u8 other_vport[0x1];
3876 u8 reserved_2[0xf]; 3876 u8 reserved_at_41[0xf];
3877 u8 vport_number[0x10]; 3877 u8 vport_number[0x10];
3878 3878
3879 struct mlx5_ifc_esw_vport_context_fields_select_bits field_select; 3879 struct mlx5_ifc_esw_vport_context_fields_select_bits field_select;
@@ -3883,124 +3883,124 @@ struct mlx5_ifc_modify_esw_vport_context_in_bits {
3883 3883
3884struct mlx5_ifc_query_eq_out_bits { 3884struct mlx5_ifc_query_eq_out_bits {
3885 u8 status[0x8]; 3885 u8 status[0x8];
3886 u8 reserved_0[0x18]; 3886 u8 reserved_at_8[0x18];
3887 3887
3888 u8 syndrome[0x20]; 3888 u8 syndrome[0x20];
3889 3889
3890 u8 reserved_1[0x40]; 3890 u8 reserved_at_40[0x40];
3891 3891
3892 struct mlx5_ifc_eqc_bits eq_context_entry; 3892 struct mlx5_ifc_eqc_bits eq_context_entry;
3893 3893
3894 u8 reserved_2[0x40]; 3894 u8 reserved_at_280[0x40];
3895 3895
3896 u8 event_bitmask[0x40]; 3896 u8 event_bitmask[0x40];
3897 3897
3898 u8 reserved_3[0x580]; 3898 u8 reserved_at_300[0x580];
3899 3899
3900 u8 pas[0][0x40]; 3900 u8 pas[0][0x40];
3901}; 3901};
3902 3902
3903struct mlx5_ifc_query_eq_in_bits { 3903struct mlx5_ifc_query_eq_in_bits {
3904 u8 opcode[0x10]; 3904 u8 opcode[0x10];
3905 u8 reserved_0[0x10]; 3905 u8 reserved_at_10[0x10];
3906 3906
3907 u8 reserved_1[0x10]; 3907 u8 reserved_at_20[0x10];
3908 u8 op_mod[0x10]; 3908 u8 op_mod[0x10];
3909 3909
3910 u8 reserved_2[0x18]; 3910 u8 reserved_at_40[0x18];
3911 u8 eq_number[0x8]; 3911 u8 eq_number[0x8];
3912 3912
3913 u8 reserved_3[0x20]; 3913 u8 reserved_at_60[0x20];
3914}; 3914};
3915 3915
3916struct mlx5_ifc_query_dct_out_bits { 3916struct mlx5_ifc_query_dct_out_bits {
3917 u8 status[0x8]; 3917 u8 status[0x8];
3918 u8 reserved_0[0x18]; 3918 u8 reserved_at_8[0x18];
3919 3919
3920 u8 syndrome[0x20]; 3920 u8 syndrome[0x20];
3921 3921
3922 u8 reserved_1[0x40]; 3922 u8 reserved_at_40[0x40];
3923 3923
3924 struct mlx5_ifc_dctc_bits dct_context_entry; 3924 struct mlx5_ifc_dctc_bits dct_context_entry;
3925 3925
3926 u8 reserved_2[0x180]; 3926 u8 reserved_at_280[0x180];
3927}; 3927};
3928 3928
3929struct mlx5_ifc_query_dct_in_bits { 3929struct mlx5_ifc_query_dct_in_bits {
3930 u8 opcode[0x10]; 3930 u8 opcode[0x10];
3931 u8 reserved_0[0x10]; 3931 u8 reserved_at_10[0x10];
3932 3932
3933 u8 reserved_1[0x10]; 3933 u8 reserved_at_20[0x10];
3934 u8 op_mod[0x10]; 3934 u8 op_mod[0x10];
3935 3935
3936 u8 reserved_2[0x8]; 3936 u8 reserved_at_40[0x8];
3937 u8 dctn[0x18]; 3937 u8 dctn[0x18];
3938 3938
3939 u8 reserved_3[0x20]; 3939 u8 reserved_at_60[0x20];
3940}; 3940};
3941 3941
3942struct mlx5_ifc_query_cq_out_bits { 3942struct mlx5_ifc_query_cq_out_bits {
3943 u8 status[0x8]; 3943 u8 status[0x8];
3944 u8 reserved_0[0x18]; 3944 u8 reserved_at_8[0x18];
3945 3945
3946 u8 syndrome[0x20]; 3946 u8 syndrome[0x20];
3947 3947
3948 u8 reserved_1[0x40]; 3948 u8 reserved_at_40[0x40];
3949 3949
3950 struct mlx5_ifc_cqc_bits cq_context; 3950 struct mlx5_ifc_cqc_bits cq_context;
3951 3951
3952 u8 reserved_2[0x600]; 3952 u8 reserved_at_280[0x600];
3953 3953
3954 u8 pas[0][0x40]; 3954 u8 pas[0][0x40];
3955}; 3955};
3956 3956
3957struct mlx5_ifc_query_cq_in_bits { 3957struct mlx5_ifc_query_cq_in_bits {
3958 u8 opcode[0x10]; 3958 u8 opcode[0x10];
3959 u8 reserved_0[0x10]; 3959 u8 reserved_at_10[0x10];
3960 3960
3961 u8 reserved_1[0x10]; 3961 u8 reserved_at_20[0x10];
3962 u8 op_mod[0x10]; 3962 u8 op_mod[0x10];
3963 3963
3964 u8 reserved_2[0x8]; 3964 u8 reserved_at_40[0x8];
3965 u8 cqn[0x18]; 3965 u8 cqn[0x18];
3966 3966
3967 u8 reserved_3[0x20]; 3967 u8 reserved_at_60[0x20];
3968}; 3968};
3969 3969
3970struct mlx5_ifc_query_cong_status_out_bits { 3970struct mlx5_ifc_query_cong_status_out_bits {
3971 u8 status[0x8]; 3971 u8 status[0x8];
3972 u8 reserved_0[0x18]; 3972 u8 reserved_at_8[0x18];
3973 3973
3974 u8 syndrome[0x20]; 3974 u8 syndrome[0x20];
3975 3975
3976 u8 reserved_1[0x20]; 3976 u8 reserved_at_40[0x20];
3977 3977
3978 u8 enable[0x1]; 3978 u8 enable[0x1];
3979 u8 tag_enable[0x1]; 3979 u8 tag_enable[0x1];
3980 u8 reserved_2[0x1e]; 3980 u8 reserved_at_62[0x1e];
3981}; 3981};
3982 3982
3983struct mlx5_ifc_query_cong_status_in_bits { 3983struct mlx5_ifc_query_cong_status_in_bits {
3984 u8 opcode[0x10]; 3984 u8 opcode[0x10];
3985 u8 reserved_0[0x10]; 3985 u8 reserved_at_10[0x10];
3986 3986
3987 u8 reserved_1[0x10]; 3987 u8 reserved_at_20[0x10];
3988 u8 op_mod[0x10]; 3988 u8 op_mod[0x10];
3989 3989
3990 u8 reserved_2[0x18]; 3990 u8 reserved_at_40[0x18];
3991 u8 priority[0x4]; 3991 u8 priority[0x4];
3992 u8 cong_protocol[0x4]; 3992 u8 cong_protocol[0x4];
3993 3993
3994 u8 reserved_3[0x20]; 3994 u8 reserved_at_60[0x20];
3995}; 3995};
3996 3996
3997struct mlx5_ifc_query_cong_statistics_out_bits { 3997struct mlx5_ifc_query_cong_statistics_out_bits {
3998 u8 status[0x8]; 3998 u8 status[0x8];
3999 u8 reserved_0[0x18]; 3999 u8 reserved_at_8[0x18];
4000 4000
4001 u8 syndrome[0x20]; 4001 u8 syndrome[0x20];
4002 4002
4003 u8 reserved_1[0x40]; 4003 u8 reserved_at_40[0x40];
4004 4004
4005 u8 cur_flows[0x20]; 4005 u8 cur_flows[0x20];
4006 4006
@@ -4014,7 +4014,7 @@ struct mlx5_ifc_query_cong_statistics_out_bits {
4014 4014
4015 u8 cnp_handled_low[0x20]; 4015 u8 cnp_handled_low[0x20];
4016 4016
4017 u8 reserved_2[0x100]; 4017 u8 reserved_at_140[0x100];
4018 4018
4019 u8 time_stamp_high[0x20]; 4019 u8 time_stamp_high[0x20];
4020 4020
@@ -4030,453 +4030,453 @@ struct mlx5_ifc_query_cong_statistics_out_bits {
4030 4030
4031 u8 cnps_sent_low[0x20]; 4031 u8 cnps_sent_low[0x20];
4032 4032
4033 u8 reserved_3[0x560]; 4033 u8 reserved_at_320[0x560];
4034}; 4034};
4035 4035
4036struct mlx5_ifc_query_cong_statistics_in_bits { 4036struct mlx5_ifc_query_cong_statistics_in_bits {
4037 u8 opcode[0x10]; 4037 u8 opcode[0x10];
4038 u8 reserved_0[0x10]; 4038 u8 reserved_at_10[0x10];
4039 4039
4040 u8 reserved_1[0x10]; 4040 u8 reserved_at_20[0x10];
4041 u8 op_mod[0x10]; 4041 u8 op_mod[0x10];
4042 4042
4043 u8 clear[0x1]; 4043 u8 clear[0x1];
4044 u8 reserved_2[0x1f]; 4044 u8 reserved_at_41[0x1f];
4045 4045
4046 u8 reserved_3[0x20]; 4046 u8 reserved_at_60[0x20];
4047}; 4047};
4048 4048
4049struct mlx5_ifc_query_cong_params_out_bits { 4049struct mlx5_ifc_query_cong_params_out_bits {
4050 u8 status[0x8]; 4050 u8 status[0x8];
4051 u8 reserved_0[0x18]; 4051 u8 reserved_at_8[0x18];
4052 4052
4053 u8 syndrome[0x20]; 4053 u8 syndrome[0x20];
4054 4054
4055 u8 reserved_1[0x40]; 4055 u8 reserved_at_40[0x40];
4056 4056
4057 union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters; 4057 union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
4058}; 4058};
4059 4059
4060struct mlx5_ifc_query_cong_params_in_bits { 4060struct mlx5_ifc_query_cong_params_in_bits {
4061 u8 opcode[0x10]; 4061 u8 opcode[0x10];
4062 u8 reserved_0[0x10]; 4062 u8 reserved_at_10[0x10];
4063 4063
4064 u8 reserved_1[0x10]; 4064 u8 reserved_at_20[0x10];
4065 u8 op_mod[0x10]; 4065 u8 op_mod[0x10];
4066 4066
4067 u8 reserved_2[0x1c]; 4067 u8 reserved_at_40[0x1c];
4068 u8 cong_protocol[0x4]; 4068 u8 cong_protocol[0x4];
4069 4069
4070 u8 reserved_3[0x20]; 4070 u8 reserved_at_60[0x20];
4071}; 4071};
4072 4072
4073struct mlx5_ifc_query_adapter_out_bits { 4073struct mlx5_ifc_query_adapter_out_bits {
4074 u8 status[0x8]; 4074 u8 status[0x8];
4075 u8 reserved_0[0x18]; 4075 u8 reserved_at_8[0x18];
4076 4076
4077 u8 syndrome[0x20]; 4077 u8 syndrome[0x20];
4078 4078
4079 u8 reserved_1[0x40]; 4079 u8 reserved_at_40[0x40];
4080 4080
4081 struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct; 4081 struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct;
4082}; 4082};
4083 4083
4084struct mlx5_ifc_query_adapter_in_bits { 4084struct mlx5_ifc_query_adapter_in_bits {
4085 u8 opcode[0x10]; 4085 u8 opcode[0x10];
4086 u8 reserved_0[0x10]; 4086 u8 reserved_at_10[0x10];
4087 4087
4088 u8 reserved_1[0x10]; 4088 u8 reserved_at_20[0x10];
4089 u8 op_mod[0x10]; 4089 u8 op_mod[0x10];
4090 4090
4091 u8 reserved_2[0x40]; 4091 u8 reserved_at_40[0x40];
4092}; 4092};
4093 4093
4094struct mlx5_ifc_qp_2rst_out_bits { 4094struct mlx5_ifc_qp_2rst_out_bits {
4095 u8 status[0x8]; 4095 u8 status[0x8];
4096 u8 reserved_0[0x18]; 4096 u8 reserved_at_8[0x18];
4097 4097
4098 u8 syndrome[0x20]; 4098 u8 syndrome[0x20];
4099 4099
4100 u8 reserved_1[0x40]; 4100 u8 reserved_at_40[0x40];
4101}; 4101};
4102 4102
4103struct mlx5_ifc_qp_2rst_in_bits { 4103struct mlx5_ifc_qp_2rst_in_bits {
4104 u8 opcode[0x10]; 4104 u8 opcode[0x10];
4105 u8 reserved_0[0x10]; 4105 u8 reserved_at_10[0x10];
4106 4106
4107 u8 reserved_1[0x10]; 4107 u8 reserved_at_20[0x10];
4108 u8 op_mod[0x10]; 4108 u8 op_mod[0x10];
4109 4109
4110 u8 reserved_2[0x8]; 4110 u8 reserved_at_40[0x8];
4111 u8 qpn[0x18]; 4111 u8 qpn[0x18];
4112 4112
4113 u8 reserved_3[0x20]; 4113 u8 reserved_at_60[0x20];
4114}; 4114};
4115 4115
4116struct mlx5_ifc_qp_2err_out_bits { 4116struct mlx5_ifc_qp_2err_out_bits {
4117 u8 status[0x8]; 4117 u8 status[0x8];
4118 u8 reserved_0[0x18]; 4118 u8 reserved_at_8[0x18];
4119 4119
4120 u8 syndrome[0x20]; 4120 u8 syndrome[0x20];
4121 4121
4122 u8 reserved_1[0x40]; 4122 u8 reserved_at_40[0x40];
4123}; 4123};
4124 4124
4125struct mlx5_ifc_qp_2err_in_bits { 4125struct mlx5_ifc_qp_2err_in_bits {
4126 u8 opcode[0x10]; 4126 u8 opcode[0x10];
4127 u8 reserved_0[0x10]; 4127 u8 reserved_at_10[0x10];
4128 4128
4129 u8 reserved_1[0x10]; 4129 u8 reserved_at_20[0x10];
4130 u8 op_mod[0x10]; 4130 u8 op_mod[0x10];
4131 4131
4132 u8 reserved_2[0x8]; 4132 u8 reserved_at_40[0x8];
4133 u8 qpn[0x18]; 4133 u8 qpn[0x18];
4134 4134
4135 u8 reserved_3[0x20]; 4135 u8 reserved_at_60[0x20];
4136}; 4136};
4137 4137
4138struct mlx5_ifc_page_fault_resume_out_bits { 4138struct mlx5_ifc_page_fault_resume_out_bits {
4139 u8 status[0x8]; 4139 u8 status[0x8];
4140 u8 reserved_0[0x18]; 4140 u8 reserved_at_8[0x18];
4141 4141
4142 u8 syndrome[0x20]; 4142 u8 syndrome[0x20];
4143 4143
4144 u8 reserved_1[0x40]; 4144 u8 reserved_at_40[0x40];
4145}; 4145};
4146 4146
4147struct mlx5_ifc_page_fault_resume_in_bits { 4147struct mlx5_ifc_page_fault_resume_in_bits {
4148 u8 opcode[0x10]; 4148 u8 opcode[0x10];
4149 u8 reserved_0[0x10]; 4149 u8 reserved_at_10[0x10];
4150 4150
4151 u8 reserved_1[0x10]; 4151 u8 reserved_at_20[0x10];
4152 u8 op_mod[0x10]; 4152 u8 op_mod[0x10];
4153 4153
4154 u8 error[0x1]; 4154 u8 error[0x1];
4155 u8 reserved_2[0x4]; 4155 u8 reserved_at_41[0x4];
4156 u8 rdma[0x1]; 4156 u8 rdma[0x1];
4157 u8 read_write[0x1]; 4157 u8 read_write[0x1];
4158 u8 req_res[0x1]; 4158 u8 req_res[0x1];
4159 u8 qpn[0x18]; 4159 u8 qpn[0x18];
4160 4160
4161 u8 reserved_3[0x20]; 4161 u8 reserved_at_60[0x20];
4162}; 4162};
4163 4163
4164struct mlx5_ifc_nop_out_bits { 4164struct mlx5_ifc_nop_out_bits {
4165 u8 status[0x8]; 4165 u8 status[0x8];
4166 u8 reserved_0[0x18]; 4166 u8 reserved_at_8[0x18];
4167 4167
4168 u8 syndrome[0x20]; 4168 u8 syndrome[0x20];
4169 4169
4170 u8 reserved_1[0x40]; 4170 u8 reserved_at_40[0x40];
4171}; 4171};
4172 4172
4173struct mlx5_ifc_nop_in_bits { 4173struct mlx5_ifc_nop_in_bits {
4174 u8 opcode[0x10]; 4174 u8 opcode[0x10];
4175 u8 reserved_0[0x10]; 4175 u8 reserved_at_10[0x10];
4176 4176
4177 u8 reserved_1[0x10]; 4177 u8 reserved_at_20[0x10];
4178 u8 op_mod[0x10]; 4178 u8 op_mod[0x10];
4179 4179
4180 u8 reserved_2[0x40]; 4180 u8 reserved_at_40[0x40];
4181}; 4181};
4182 4182
4183struct mlx5_ifc_modify_vport_state_out_bits { 4183struct mlx5_ifc_modify_vport_state_out_bits {
4184 u8 status[0x8]; 4184 u8 status[0x8];
4185 u8 reserved_0[0x18]; 4185 u8 reserved_at_8[0x18];
4186 4186
4187 u8 syndrome[0x20]; 4187 u8 syndrome[0x20];
4188 4188
4189 u8 reserved_1[0x40]; 4189 u8 reserved_at_40[0x40];
4190}; 4190};
4191 4191
4192struct mlx5_ifc_modify_vport_state_in_bits { 4192struct mlx5_ifc_modify_vport_state_in_bits {
4193 u8 opcode[0x10]; 4193 u8 opcode[0x10];
4194 u8 reserved_0[0x10]; 4194 u8 reserved_at_10[0x10];
4195 4195
4196 u8 reserved_1[0x10]; 4196 u8 reserved_at_20[0x10];
4197 u8 op_mod[0x10]; 4197 u8 op_mod[0x10];
4198 4198
4199 u8 other_vport[0x1]; 4199 u8 other_vport[0x1];
4200 u8 reserved_2[0xf]; 4200 u8 reserved_at_41[0xf];
4201 u8 vport_number[0x10]; 4201 u8 vport_number[0x10];
4202 4202
4203 u8 reserved_3[0x18]; 4203 u8 reserved_at_60[0x18];
4204 u8 admin_state[0x4]; 4204 u8 admin_state[0x4];
4205 u8 reserved_4[0x4]; 4205 u8 reserved_at_7c[0x4];
4206}; 4206};
4207 4207
4208struct mlx5_ifc_modify_tis_out_bits { 4208struct mlx5_ifc_modify_tis_out_bits {
4209 u8 status[0x8]; 4209 u8 status[0x8];
4210 u8 reserved_0[0x18]; 4210 u8 reserved_at_8[0x18];
4211 4211
4212 u8 syndrome[0x20]; 4212 u8 syndrome[0x20];
4213 4213
4214 u8 reserved_1[0x40]; 4214 u8 reserved_at_40[0x40];
4215}; 4215};
4216 4216
4217struct mlx5_ifc_modify_tis_bitmask_bits { 4217struct mlx5_ifc_modify_tis_bitmask_bits {
4218 u8 reserved_0[0x20]; 4218 u8 reserved_at_0[0x20];
4219 4219
4220 u8 reserved_1[0x1f]; 4220 u8 reserved_at_20[0x1f];
4221 u8 prio[0x1]; 4221 u8 prio[0x1];
4222}; 4222};
4223 4223
4224struct mlx5_ifc_modify_tis_in_bits { 4224struct mlx5_ifc_modify_tis_in_bits {
4225 u8 opcode[0x10]; 4225 u8 opcode[0x10];
4226 u8 reserved_0[0x10]; 4226 u8 reserved_at_10[0x10];
4227 4227
4228 u8 reserved_1[0x10]; 4228 u8 reserved_at_20[0x10];
4229 u8 op_mod[0x10]; 4229 u8 op_mod[0x10];
4230 4230
4231 u8 reserved_2[0x8]; 4231 u8 reserved_at_40[0x8];
4232 u8 tisn[0x18]; 4232 u8 tisn[0x18];
4233 4233
4234 u8 reserved_3[0x20]; 4234 u8 reserved_at_60[0x20];
4235 4235
4236 struct mlx5_ifc_modify_tis_bitmask_bits bitmask; 4236 struct mlx5_ifc_modify_tis_bitmask_bits bitmask;
4237 4237
4238 u8 reserved_4[0x40]; 4238 u8 reserved_at_c0[0x40];
4239 4239
4240 struct mlx5_ifc_tisc_bits ctx; 4240 struct mlx5_ifc_tisc_bits ctx;
4241}; 4241};
4242 4242
4243struct mlx5_ifc_modify_tir_bitmask_bits { 4243struct mlx5_ifc_modify_tir_bitmask_bits {
4244 u8 reserved_0[0x20]; 4244 u8 reserved_at_0[0x20];
4245 4245
4246 u8 reserved_1[0x1b]; 4246 u8 reserved_at_20[0x1b];
4247 u8 self_lb_en[0x1]; 4247 u8 self_lb_en[0x1];
4248 u8 reserved_2[0x3]; 4248 u8 reserved_at_3c[0x3];
4249 u8 lro[0x1]; 4249 u8 lro[0x1];
4250}; 4250};
4251 4251
4252struct mlx5_ifc_modify_tir_out_bits { 4252struct mlx5_ifc_modify_tir_out_bits {
4253 u8 status[0x8]; 4253 u8 status[0x8];
4254 u8 reserved_0[0x18]; 4254 u8 reserved_at_8[0x18];
4255 4255
4256 u8 syndrome[0x20]; 4256 u8 syndrome[0x20];
4257 4257
4258 u8 reserved_1[0x40]; 4258 u8 reserved_at_40[0x40];
4259}; 4259};
4260 4260
4261struct mlx5_ifc_modify_tir_in_bits { 4261struct mlx5_ifc_modify_tir_in_bits {
4262 u8 opcode[0x10]; 4262 u8 opcode[0x10];
4263 u8 reserved_0[0x10]; 4263 u8 reserved_at_10[0x10];
4264 4264
4265 u8 reserved_1[0x10]; 4265 u8 reserved_at_20[0x10];
4266 u8 op_mod[0x10]; 4266 u8 op_mod[0x10];
4267 4267
4268 u8 reserved_2[0x8]; 4268 u8 reserved_at_40[0x8];
4269 u8 tirn[0x18]; 4269 u8 tirn[0x18];
4270 4270
4271 u8 reserved_3[0x20]; 4271 u8 reserved_at_60[0x20];
4272 4272
4273 struct mlx5_ifc_modify_tir_bitmask_bits bitmask; 4273 struct mlx5_ifc_modify_tir_bitmask_bits bitmask;
4274 4274
4275 u8 reserved_4[0x40]; 4275 u8 reserved_at_c0[0x40];
4276 4276
4277 struct mlx5_ifc_tirc_bits ctx; 4277 struct mlx5_ifc_tirc_bits ctx;
4278}; 4278};
4279 4279
4280struct mlx5_ifc_modify_sq_out_bits { 4280struct mlx5_ifc_modify_sq_out_bits {
4281 u8 status[0x8]; 4281 u8 status[0x8];
4282 u8 reserved_0[0x18]; 4282 u8 reserved_at_8[0x18];
4283 4283
4284 u8 syndrome[0x20]; 4284 u8 syndrome[0x20];
4285 4285
4286 u8 reserved_1[0x40]; 4286 u8 reserved_at_40[0x40];
4287}; 4287};
4288 4288
4289struct mlx5_ifc_modify_sq_in_bits { 4289struct mlx5_ifc_modify_sq_in_bits {
4290 u8 opcode[0x10]; 4290 u8 opcode[0x10];
4291 u8 reserved_0[0x10]; 4291 u8 reserved_at_10[0x10];
4292 4292
4293 u8 reserved_1[0x10]; 4293 u8 reserved_at_20[0x10];
4294 u8 op_mod[0x10]; 4294 u8 op_mod[0x10];
4295 4295
4296 u8 sq_state[0x4]; 4296 u8 sq_state[0x4];
4297 u8 reserved_2[0x4]; 4297 u8 reserved_at_44[0x4];
4298 u8 sqn[0x18]; 4298 u8 sqn[0x18];
4299 4299
4300 u8 reserved_3[0x20]; 4300 u8 reserved_at_60[0x20];
4301 4301
4302 u8 modify_bitmask[0x40]; 4302 u8 modify_bitmask[0x40];
4303 4303
4304 u8 reserved_4[0x40]; 4304 u8 reserved_at_c0[0x40];
4305 4305
4306 struct mlx5_ifc_sqc_bits ctx; 4306 struct mlx5_ifc_sqc_bits ctx;
4307}; 4307};
4308 4308
4309struct mlx5_ifc_modify_rqt_out_bits { 4309struct mlx5_ifc_modify_rqt_out_bits {
4310 u8 status[0x8]; 4310 u8 status[0x8];
4311 u8 reserved_0[0x18]; 4311 u8 reserved_at_8[0x18];
4312 4312
4313 u8 syndrome[0x20]; 4313 u8 syndrome[0x20];
4314 4314
4315 u8 reserved_1[0x40]; 4315 u8 reserved_at_40[0x40];
4316}; 4316};
4317 4317
4318struct mlx5_ifc_rqt_bitmask_bits { 4318struct mlx5_ifc_rqt_bitmask_bits {
4319 u8 reserved[0x20]; 4319 u8 reserved_at_0[0x20];
4320 4320
4321 u8 reserved1[0x1f]; 4321 u8 reserved_at_20[0x1f];
4322 u8 rqn_list[0x1]; 4322 u8 rqn_list[0x1];
4323}; 4323};
4324 4324
4325struct mlx5_ifc_modify_rqt_in_bits { 4325struct mlx5_ifc_modify_rqt_in_bits {
4326 u8 opcode[0x10]; 4326 u8 opcode[0x10];
4327 u8 reserved_0[0x10]; 4327 u8 reserved_at_10[0x10];
4328 4328
4329 u8 reserved_1[0x10]; 4329 u8 reserved_at_20[0x10];
4330 u8 op_mod[0x10]; 4330 u8 op_mod[0x10];
4331 4331
4332 u8 reserved_2[0x8]; 4332 u8 reserved_at_40[0x8];
4333 u8 rqtn[0x18]; 4333 u8 rqtn[0x18];
4334 4334
4335 u8 reserved_3[0x20]; 4335 u8 reserved_at_60[0x20];
4336 4336
4337 struct mlx5_ifc_rqt_bitmask_bits bitmask; 4337 struct mlx5_ifc_rqt_bitmask_bits bitmask;
4338 4338
4339 u8 reserved_4[0x40]; 4339 u8 reserved_at_c0[0x40];
4340 4340
4341 struct mlx5_ifc_rqtc_bits ctx; 4341 struct mlx5_ifc_rqtc_bits ctx;
4342}; 4342};
4343 4343
4344struct mlx5_ifc_modify_rq_out_bits { 4344struct mlx5_ifc_modify_rq_out_bits {
4345 u8 status[0x8]; 4345 u8 status[0x8];
4346 u8 reserved_0[0x18]; 4346 u8 reserved_at_8[0x18];
4347 4347
4348 u8 syndrome[0x20]; 4348 u8 syndrome[0x20];
4349 4349
4350 u8 reserved_1[0x40]; 4350 u8 reserved_at_40[0x40];
4351}; 4351};
4352 4352
4353struct mlx5_ifc_modify_rq_in_bits { 4353struct mlx5_ifc_modify_rq_in_bits {
4354 u8 opcode[0x10]; 4354 u8 opcode[0x10];
4355 u8 reserved_0[0x10]; 4355 u8 reserved_at_10[0x10];
4356 4356
4357 u8 reserved_1[0x10]; 4357 u8 reserved_at_20[0x10];
4358 u8 op_mod[0x10]; 4358 u8 op_mod[0x10];
4359 4359
4360 u8 rq_state[0x4]; 4360 u8 rq_state[0x4];
4361 u8 reserved_2[0x4]; 4361 u8 reserved_at_44[0x4];
4362 u8 rqn[0x18]; 4362 u8 rqn[0x18];
4363 4363
4364 u8 reserved_3[0x20]; 4364 u8 reserved_at_60[0x20];
4365 4365
4366 u8 modify_bitmask[0x40]; 4366 u8 modify_bitmask[0x40];
4367 4367
4368 u8 reserved_4[0x40]; 4368 u8 reserved_at_c0[0x40];
4369 4369
4370 struct mlx5_ifc_rqc_bits ctx; 4370 struct mlx5_ifc_rqc_bits ctx;
4371}; 4371};
4372 4372
4373struct mlx5_ifc_modify_rmp_out_bits { 4373struct mlx5_ifc_modify_rmp_out_bits {
4374 u8 status[0x8]; 4374 u8 status[0x8];
4375 u8 reserved_0[0x18]; 4375 u8 reserved_at_8[0x18];
4376 4376
4377 u8 syndrome[0x20]; 4377 u8 syndrome[0x20];
4378 4378
4379 u8 reserved_1[0x40]; 4379 u8 reserved_at_40[0x40];
4380}; 4380};
4381 4381
4382struct mlx5_ifc_rmp_bitmask_bits { 4382struct mlx5_ifc_rmp_bitmask_bits {
4383 u8 reserved[0x20]; 4383 u8 reserved_at_0[0x20];
4384 4384
4385 u8 reserved1[0x1f]; 4385 u8 reserved_at_20[0x1f];
4386 u8 lwm[0x1]; 4386 u8 lwm[0x1];
4387}; 4387};
4388 4388
4389struct mlx5_ifc_modify_rmp_in_bits { 4389struct mlx5_ifc_modify_rmp_in_bits {
4390 u8 opcode[0x10]; 4390 u8 opcode[0x10];
4391 u8 reserved_0[0x10]; 4391 u8 reserved_at_10[0x10];
4392 4392
4393 u8 reserved_1[0x10]; 4393 u8 reserved_at_20[0x10];
4394 u8 op_mod[0x10]; 4394 u8 op_mod[0x10];
4395 4395
4396 u8 rmp_state[0x4]; 4396 u8 rmp_state[0x4];
4397 u8 reserved_2[0x4]; 4397 u8 reserved_at_44[0x4];
4398 u8 rmpn[0x18]; 4398 u8 rmpn[0x18];
4399 4399
4400 u8 reserved_3[0x20]; 4400 u8 reserved_at_60[0x20];
4401 4401
4402 struct mlx5_ifc_rmp_bitmask_bits bitmask; 4402 struct mlx5_ifc_rmp_bitmask_bits bitmask;
4403 4403
4404 u8 reserved_4[0x40]; 4404 u8 reserved_at_c0[0x40];
4405 4405
4406 struct mlx5_ifc_rmpc_bits ctx; 4406 struct mlx5_ifc_rmpc_bits ctx;
4407}; 4407};
4408 4408
4409struct mlx5_ifc_modify_nic_vport_context_out_bits { 4409struct mlx5_ifc_modify_nic_vport_context_out_bits {
4410 u8 status[0x8]; 4410 u8 status[0x8];
4411 u8 reserved_0[0x18]; 4411 u8 reserved_at_8[0x18];
4412 4412
4413 u8 syndrome[0x20]; 4413 u8 syndrome[0x20];
4414 4414
4415 u8 reserved_1[0x40]; 4415 u8 reserved_at_40[0x40];
4416}; 4416};
4417 4417
4418struct mlx5_ifc_modify_nic_vport_field_select_bits { 4418struct mlx5_ifc_modify_nic_vport_field_select_bits {
4419 u8 reserved_0[0x19]; 4419 u8 reserved_at_0[0x19];
4420 u8 mtu[0x1]; 4420 u8 mtu[0x1];
4421 u8 change_event[0x1]; 4421 u8 change_event[0x1];
4422 u8 promisc[0x1]; 4422 u8 promisc[0x1];
4423 u8 permanent_address[0x1]; 4423 u8 permanent_address[0x1];
4424 u8 addresses_list[0x1]; 4424 u8 addresses_list[0x1];
4425 u8 roce_en[0x1]; 4425 u8 roce_en[0x1];
4426 u8 reserved_1[0x1]; 4426 u8 reserved_at_1f[0x1];
4427}; 4427};
4428 4428
4429struct mlx5_ifc_modify_nic_vport_context_in_bits { 4429struct mlx5_ifc_modify_nic_vport_context_in_bits {
4430 u8 opcode[0x10]; 4430 u8 opcode[0x10];
4431 u8 reserved_0[0x10]; 4431 u8 reserved_at_10[0x10];
4432 4432
4433 u8 reserved_1[0x10]; 4433 u8 reserved_at_20[0x10];
4434 u8 op_mod[0x10]; 4434 u8 op_mod[0x10];
4435 4435
4436 u8 other_vport[0x1]; 4436 u8 other_vport[0x1];
4437 u8 reserved_2[0xf]; 4437 u8 reserved_at_41[0xf];
4438 u8 vport_number[0x10]; 4438 u8 vport_number[0x10];
4439 4439
4440 struct mlx5_ifc_modify_nic_vport_field_select_bits field_select; 4440 struct mlx5_ifc_modify_nic_vport_field_select_bits field_select;
4441 4441
4442 u8 reserved_3[0x780]; 4442 u8 reserved_at_80[0x780];
4443 4443
4444 struct mlx5_ifc_nic_vport_context_bits nic_vport_context; 4444 struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
4445}; 4445};
4446 4446
4447struct mlx5_ifc_modify_hca_vport_context_out_bits { 4447struct mlx5_ifc_modify_hca_vport_context_out_bits {
4448 u8 status[0x8]; 4448 u8 status[0x8];
4449 u8 reserved_0[0x18]; 4449 u8 reserved_at_8[0x18];
4450 4450
4451 u8 syndrome[0x20]; 4451 u8 syndrome[0x20];
4452 4452
4453 u8 reserved_1[0x40]; 4453 u8 reserved_at_40[0x40];
4454}; 4454};
4455 4455
4456struct mlx5_ifc_modify_hca_vport_context_in_bits { 4456struct mlx5_ifc_modify_hca_vport_context_in_bits {
4457 u8 opcode[0x10]; 4457 u8 opcode[0x10];
4458 u8 reserved_0[0x10]; 4458 u8 reserved_at_10[0x10];
4459 4459
4460 u8 reserved_1[0x10]; 4460 u8 reserved_at_20[0x10];
4461 u8 op_mod[0x10]; 4461 u8 op_mod[0x10];
4462 4462
4463 u8 other_vport[0x1]; 4463 u8 other_vport[0x1];
4464 u8 reserved_2[0xb]; 4464 u8 reserved_at_41[0xb];
4465 u8 port_num[0x4]; 4465 u8 port_num[0x4];
4466 u8 vport_number[0x10]; 4466 u8 vport_number[0x10];
4467 4467
4468 u8 reserved_3[0x20]; 4468 u8 reserved_at_60[0x20];
4469 4469
4470 struct mlx5_ifc_hca_vport_context_bits hca_vport_context; 4470 struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
4471}; 4471};
4472 4472
4473struct mlx5_ifc_modify_cq_out_bits { 4473struct mlx5_ifc_modify_cq_out_bits {
4474 u8 status[0x8]; 4474 u8 status[0x8];
4475 u8 reserved_0[0x18]; 4475 u8 reserved_at_8[0x18];
4476 4476
4477 u8 syndrome[0x20]; 4477 u8 syndrome[0x20];
4478 4478
4479 u8 reserved_1[0x40]; 4479 u8 reserved_at_40[0x40];
4480}; 4480};
4481 4481
4482enum { 4482enum {
@@ -4486,83 +4486,83 @@ enum {
4486 4486
4487struct mlx5_ifc_modify_cq_in_bits { 4487struct mlx5_ifc_modify_cq_in_bits {
4488 u8 opcode[0x10]; 4488 u8 opcode[0x10];
4489 u8 reserved_0[0x10]; 4489 u8 reserved_at_10[0x10];
4490 4490
4491 u8 reserved_1[0x10]; 4491 u8 reserved_at_20[0x10];
4492 u8 op_mod[0x10]; 4492 u8 op_mod[0x10];
4493 4493
4494 u8 reserved_2[0x8]; 4494 u8 reserved_at_40[0x8];
4495 u8 cqn[0x18]; 4495 u8 cqn[0x18];
4496 4496
4497 union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select; 4497 union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select;
4498 4498
4499 struct mlx5_ifc_cqc_bits cq_context; 4499 struct mlx5_ifc_cqc_bits cq_context;
4500 4500
4501 u8 reserved_3[0x600]; 4501 u8 reserved_at_280[0x600];
4502 4502
4503 u8 pas[0][0x40]; 4503 u8 pas[0][0x40];
4504}; 4504};
4505 4505
4506struct mlx5_ifc_modify_cong_status_out_bits { 4506struct mlx5_ifc_modify_cong_status_out_bits {
4507 u8 status[0x8]; 4507 u8 status[0x8];
4508 u8 reserved_0[0x18]; 4508 u8 reserved_at_8[0x18];
4509 4509
4510 u8 syndrome[0x20]; 4510 u8 syndrome[0x20];
4511 4511
4512 u8 reserved_1[0x40]; 4512 u8 reserved_at_40[0x40];
4513}; 4513};
4514 4514
4515struct mlx5_ifc_modify_cong_status_in_bits { 4515struct mlx5_ifc_modify_cong_status_in_bits {
4516 u8 opcode[0x10]; 4516 u8 opcode[0x10];
4517 u8 reserved_0[0x10]; 4517 u8 reserved_at_10[0x10];
4518 4518
4519 u8 reserved_1[0x10]; 4519 u8 reserved_at_20[0x10];
4520 u8 op_mod[0x10]; 4520 u8 op_mod[0x10];
4521 4521
4522 u8 reserved_2[0x18]; 4522 u8 reserved_at_40[0x18];
4523 u8 priority[0x4]; 4523 u8 priority[0x4];
4524 u8 cong_protocol[0x4]; 4524 u8 cong_protocol[0x4];
4525 4525
4526 u8 enable[0x1]; 4526 u8 enable[0x1];
4527 u8 tag_enable[0x1]; 4527 u8 tag_enable[0x1];
4528 u8 reserved_3[0x1e]; 4528 u8 reserved_at_62[0x1e];
4529}; 4529};
4530 4530
4531struct mlx5_ifc_modify_cong_params_out_bits { 4531struct mlx5_ifc_modify_cong_params_out_bits {
4532 u8 status[0x8]; 4532 u8 status[0x8];
4533 u8 reserved_0[0x18]; 4533 u8 reserved_at_8[0x18];
4534 4534
4535 u8 syndrome[0x20]; 4535 u8 syndrome[0x20];
4536 4536
4537 u8 reserved_1[0x40]; 4537 u8 reserved_at_40[0x40];
4538}; 4538};
4539 4539
4540struct mlx5_ifc_modify_cong_params_in_bits { 4540struct mlx5_ifc_modify_cong_params_in_bits {
4541 u8 opcode[0x10]; 4541 u8 opcode[0x10];
4542 u8 reserved_0[0x10]; 4542 u8 reserved_at_10[0x10];
4543 4543
4544 u8 reserved_1[0x10]; 4544 u8 reserved_at_20[0x10];
4545 u8 op_mod[0x10]; 4545 u8 op_mod[0x10];
4546 4546
4547 u8 reserved_2[0x1c]; 4547 u8 reserved_at_40[0x1c];
4548 u8 cong_protocol[0x4]; 4548 u8 cong_protocol[0x4];
4549 4549
4550 union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select; 4550 union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select;
4551 4551
4552 u8 reserved_3[0x80]; 4552 u8 reserved_at_80[0x80];
4553 4553
4554 union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters; 4554 union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
4555}; 4555};
4556 4556
4557struct mlx5_ifc_manage_pages_out_bits { 4557struct mlx5_ifc_manage_pages_out_bits {
4558 u8 status[0x8]; 4558 u8 status[0x8];
4559 u8 reserved_0[0x18]; 4559 u8 reserved_at_8[0x18];
4560 4560
4561 u8 syndrome[0x20]; 4561 u8 syndrome[0x20];
4562 4562
4563 u8 output_num_entries[0x20]; 4563 u8 output_num_entries[0x20];
4564 4564
4565 u8 reserved_1[0x20]; 4565 u8 reserved_at_60[0x20];
4566 4566
4567 u8 pas[0][0x40]; 4567 u8 pas[0][0x40];
4568}; 4568};
@@ -4575,12 +4575,12 @@ enum {
4575 4575
4576struct mlx5_ifc_manage_pages_in_bits { 4576struct mlx5_ifc_manage_pages_in_bits {
4577 u8 opcode[0x10]; 4577 u8 opcode[0x10];
4578 u8 reserved_0[0x10]; 4578 u8 reserved_at_10[0x10];
4579 4579
4580 u8 reserved_1[0x10]; 4580 u8 reserved_at_20[0x10];
4581 u8 op_mod[0x10]; 4581 u8 op_mod[0x10];
4582 4582
4583 u8 reserved_2[0x10]; 4583 u8 reserved_at_40[0x10];
4584 u8 function_id[0x10]; 4584 u8 function_id[0x10];
4585 4585
4586 u8 input_num_entries[0x20]; 4586 u8 input_num_entries[0x20];
@@ -4590,117 +4590,117 @@ struct mlx5_ifc_manage_pages_in_bits {
4590 4590
4591struct mlx5_ifc_mad_ifc_out_bits { 4591struct mlx5_ifc_mad_ifc_out_bits {
4592 u8 status[0x8]; 4592 u8 status[0x8];
4593 u8 reserved_0[0x18]; 4593 u8 reserved_at_8[0x18];
4594 4594
4595 u8 syndrome[0x20]; 4595 u8 syndrome[0x20];
4596 4596
4597 u8 reserved_1[0x40]; 4597 u8 reserved_at_40[0x40];
4598 4598
4599 u8 response_mad_packet[256][0x8]; 4599 u8 response_mad_packet[256][0x8];
4600}; 4600};
4601 4601
4602struct mlx5_ifc_mad_ifc_in_bits { 4602struct mlx5_ifc_mad_ifc_in_bits {
4603 u8 opcode[0x10]; 4603 u8 opcode[0x10];
4604 u8 reserved_0[0x10]; 4604 u8 reserved_at_10[0x10];
4605 4605
4606 u8 reserved_1[0x10]; 4606 u8 reserved_at_20[0x10];
4607 u8 op_mod[0x10]; 4607 u8 op_mod[0x10];
4608 4608
4609 u8 remote_lid[0x10]; 4609 u8 remote_lid[0x10];
4610 u8 reserved_2[0x8]; 4610 u8 reserved_at_50[0x8];
4611 u8 port[0x8]; 4611 u8 port[0x8];
4612 4612
4613 u8 reserved_3[0x20]; 4613 u8 reserved_at_60[0x20];
4614 4614
4615 u8 mad[256][0x8]; 4615 u8 mad[256][0x8];
4616}; 4616};
4617 4617
4618struct mlx5_ifc_init_hca_out_bits { 4618struct mlx5_ifc_init_hca_out_bits {
4619 u8 status[0x8]; 4619 u8 status[0x8];
4620 u8 reserved_0[0x18]; 4620 u8 reserved_at_8[0x18];
4621 4621
4622 u8 syndrome[0x20]; 4622 u8 syndrome[0x20];
4623 4623
4624 u8 reserved_1[0x40]; 4624 u8 reserved_at_40[0x40];
4625}; 4625};
4626 4626
4627struct mlx5_ifc_init_hca_in_bits { 4627struct mlx5_ifc_init_hca_in_bits {
4628 u8 opcode[0x10]; 4628 u8 opcode[0x10];
4629 u8 reserved_0[0x10]; 4629 u8 reserved_at_10[0x10];
4630 4630
4631 u8 reserved_1[0x10]; 4631 u8 reserved_at_20[0x10];
4632 u8 op_mod[0x10]; 4632 u8 op_mod[0x10];
4633 4633
4634 u8 reserved_2[0x40]; 4634 u8 reserved_at_40[0x40];
4635}; 4635};
4636 4636
4637struct mlx5_ifc_init2rtr_qp_out_bits { 4637struct mlx5_ifc_init2rtr_qp_out_bits {
4638 u8 status[0x8]; 4638 u8 status[0x8];
4639 u8 reserved_0[0x18]; 4639 u8 reserved_at_8[0x18];
4640 4640
4641 u8 syndrome[0x20]; 4641 u8 syndrome[0x20];
4642 4642
4643 u8 reserved_1[0x40]; 4643 u8 reserved_at_40[0x40];
4644}; 4644};
4645 4645
4646struct mlx5_ifc_init2rtr_qp_in_bits { 4646struct mlx5_ifc_init2rtr_qp_in_bits {
4647 u8 opcode[0x10]; 4647 u8 opcode[0x10];
4648 u8 reserved_0[0x10]; 4648 u8 reserved_at_10[0x10];
4649 4649
4650 u8 reserved_1[0x10]; 4650 u8 reserved_at_20[0x10];
4651 u8 op_mod[0x10]; 4651 u8 op_mod[0x10];
4652 4652
4653 u8 reserved_2[0x8]; 4653 u8 reserved_at_40[0x8];
4654 u8 qpn[0x18]; 4654 u8 qpn[0x18];
4655 4655
4656 u8 reserved_3[0x20]; 4656 u8 reserved_at_60[0x20];
4657 4657
4658 u8 opt_param_mask[0x20]; 4658 u8 opt_param_mask[0x20];
4659 4659
4660 u8 reserved_4[0x20]; 4660 u8 reserved_at_a0[0x20];
4661 4661
4662 struct mlx5_ifc_qpc_bits qpc; 4662 struct mlx5_ifc_qpc_bits qpc;
4663 4663
4664 u8 reserved_5[0x80]; 4664 u8 reserved_at_800[0x80];
4665}; 4665};
4666 4666
4667struct mlx5_ifc_init2init_qp_out_bits { 4667struct mlx5_ifc_init2init_qp_out_bits {
4668 u8 status[0x8]; 4668 u8 status[0x8];
4669 u8 reserved_0[0x18]; 4669 u8 reserved_at_8[0x18];
4670 4670
4671 u8 syndrome[0x20]; 4671 u8 syndrome[0x20];
4672 4672
4673 u8 reserved_1[0x40]; 4673 u8 reserved_at_40[0x40];
4674}; 4674};
4675 4675
4676struct mlx5_ifc_init2init_qp_in_bits { 4676struct mlx5_ifc_init2init_qp_in_bits {
4677 u8 opcode[0x10]; 4677 u8 opcode[0x10];
4678 u8 reserved_0[0x10]; 4678 u8 reserved_at_10[0x10];
4679 4679
4680 u8 reserved_1[0x10]; 4680 u8 reserved_at_20[0x10];
4681 u8 op_mod[0x10]; 4681 u8 op_mod[0x10];
4682 4682
4683 u8 reserved_2[0x8]; 4683 u8 reserved_at_40[0x8];
4684 u8 qpn[0x18]; 4684 u8 qpn[0x18];
4685 4685
4686 u8 reserved_3[0x20]; 4686 u8 reserved_at_60[0x20];
4687 4687
4688 u8 opt_param_mask[0x20]; 4688 u8 opt_param_mask[0x20];
4689 4689
4690 u8 reserved_4[0x20]; 4690 u8 reserved_at_a0[0x20];
4691 4691
4692 struct mlx5_ifc_qpc_bits qpc; 4692 struct mlx5_ifc_qpc_bits qpc;
4693 4693
4694 u8 reserved_5[0x80]; 4694 u8 reserved_at_800[0x80];
4695}; 4695};
4696 4696
4697struct mlx5_ifc_get_dropped_packet_log_out_bits { 4697struct mlx5_ifc_get_dropped_packet_log_out_bits {
4698 u8 status[0x8]; 4698 u8 status[0x8];
4699 u8 reserved_0[0x18]; 4699 u8 reserved_at_8[0x18];
4700 4700
4701 u8 syndrome[0x20]; 4701 u8 syndrome[0x20];
4702 4702
4703 u8 reserved_1[0x40]; 4703 u8 reserved_at_40[0x40];
4704 4704
4705 u8 packet_headers_log[128][0x8]; 4705 u8 packet_headers_log[128][0x8];
4706 4706
@@ -4709,1029 +4709,1029 @@ struct mlx5_ifc_get_dropped_packet_log_out_bits {
4709 4709
4710struct mlx5_ifc_get_dropped_packet_log_in_bits { 4710struct mlx5_ifc_get_dropped_packet_log_in_bits {
4711 u8 opcode[0x10]; 4711 u8 opcode[0x10];
4712 u8 reserved_0[0x10]; 4712 u8 reserved_at_10[0x10];
4713 4713
4714 u8 reserved_1[0x10]; 4714 u8 reserved_at_20[0x10];
4715 u8 op_mod[0x10]; 4715 u8 op_mod[0x10];
4716 4716
4717 u8 reserved_2[0x40]; 4717 u8 reserved_at_40[0x40];
4718}; 4718};
4719 4719
4720struct mlx5_ifc_gen_eqe_in_bits { 4720struct mlx5_ifc_gen_eqe_in_bits {
4721 u8 opcode[0x10]; 4721 u8 opcode[0x10];
4722 u8 reserved_0[0x10]; 4722 u8 reserved_at_10[0x10];
4723 4723
4724 u8 reserved_1[0x10]; 4724 u8 reserved_at_20[0x10];
4725 u8 op_mod[0x10]; 4725 u8 op_mod[0x10];
4726 4726
4727 u8 reserved_2[0x18]; 4727 u8 reserved_at_40[0x18];
4728 u8 eq_number[0x8]; 4728 u8 eq_number[0x8];
4729 4729
4730 u8 reserved_3[0x20]; 4730 u8 reserved_at_60[0x20];
4731 4731
4732 u8 eqe[64][0x8]; 4732 u8 eqe[64][0x8];
4733}; 4733};
4734 4734
4735struct mlx5_ifc_gen_eq_out_bits { 4735struct mlx5_ifc_gen_eq_out_bits {
4736 u8 status[0x8]; 4736 u8 status[0x8];
4737 u8 reserved_0[0x18]; 4737 u8 reserved_at_8[0x18];
4738 4738
4739 u8 syndrome[0x20]; 4739 u8 syndrome[0x20];
4740 4740
4741 u8 reserved_1[0x40]; 4741 u8 reserved_at_40[0x40];
4742}; 4742};
4743 4743
4744struct mlx5_ifc_enable_hca_out_bits { 4744struct mlx5_ifc_enable_hca_out_bits {
4745 u8 status[0x8]; 4745 u8 status[0x8];
4746 u8 reserved_0[0x18]; 4746 u8 reserved_at_8[0x18];
4747 4747
4748 u8 syndrome[0x20]; 4748 u8 syndrome[0x20];
4749 4749
4750 u8 reserved_1[0x20]; 4750 u8 reserved_at_40[0x20];
4751}; 4751};
4752 4752
4753struct mlx5_ifc_enable_hca_in_bits { 4753struct mlx5_ifc_enable_hca_in_bits {
4754 u8 opcode[0x10]; 4754 u8 opcode[0x10];
4755 u8 reserved_0[0x10]; 4755 u8 reserved_at_10[0x10];
4756 4756
4757 u8 reserved_1[0x10]; 4757 u8 reserved_at_20[0x10];
4758 u8 op_mod[0x10]; 4758 u8 op_mod[0x10];
4759 4759
4760 u8 reserved_2[0x10]; 4760 u8 reserved_at_40[0x10];
4761 u8 function_id[0x10]; 4761 u8 function_id[0x10];
4762 4762
4763 u8 reserved_3[0x20]; 4763 u8 reserved_at_60[0x20];
4764}; 4764};
4765 4765
4766struct mlx5_ifc_drain_dct_out_bits { 4766struct mlx5_ifc_drain_dct_out_bits {
4767 u8 status[0x8]; 4767 u8 status[0x8];
4768 u8 reserved_0[0x18]; 4768 u8 reserved_at_8[0x18];
4769 4769
4770 u8 syndrome[0x20]; 4770 u8 syndrome[0x20];
4771 4771
4772 u8 reserved_1[0x40]; 4772 u8 reserved_at_40[0x40];
4773}; 4773};
4774 4774
4775struct mlx5_ifc_drain_dct_in_bits { 4775struct mlx5_ifc_drain_dct_in_bits {
4776 u8 opcode[0x10]; 4776 u8 opcode[0x10];
4777 u8 reserved_0[0x10]; 4777 u8 reserved_at_10[0x10];
4778 4778
4779 u8 reserved_1[0x10]; 4779 u8 reserved_at_20[0x10];
4780 u8 op_mod[0x10]; 4780 u8 op_mod[0x10];
4781 4781
4782 u8 reserved_2[0x8]; 4782 u8 reserved_at_40[0x8];
4783 u8 dctn[0x18]; 4783 u8 dctn[0x18];
4784 4784
4785 u8 reserved_3[0x20]; 4785 u8 reserved_at_60[0x20];
4786}; 4786};
4787 4787
4788struct mlx5_ifc_disable_hca_out_bits { 4788struct mlx5_ifc_disable_hca_out_bits {
4789 u8 status[0x8]; 4789 u8 status[0x8];
4790 u8 reserved_0[0x18]; 4790 u8 reserved_at_8[0x18];
4791 4791
4792 u8 syndrome[0x20]; 4792 u8 syndrome[0x20];
4793 4793
4794 u8 reserved_1[0x20]; 4794 u8 reserved_at_40[0x20];
4795}; 4795};
4796 4796
4797struct mlx5_ifc_disable_hca_in_bits { 4797struct mlx5_ifc_disable_hca_in_bits {
4798 u8 opcode[0x10]; 4798 u8 opcode[0x10];
4799 u8 reserved_0[0x10]; 4799 u8 reserved_at_10[0x10];
4800 4800
4801 u8 reserved_1[0x10]; 4801 u8 reserved_at_20[0x10];
4802 u8 op_mod[0x10]; 4802 u8 op_mod[0x10];
4803 4803
4804 u8 reserved_2[0x10]; 4804 u8 reserved_at_40[0x10];
4805 u8 function_id[0x10]; 4805 u8 function_id[0x10];
4806 4806
4807 u8 reserved_3[0x20]; 4807 u8 reserved_at_60[0x20];
4808}; 4808};
4809 4809
4810struct mlx5_ifc_detach_from_mcg_out_bits { 4810struct mlx5_ifc_detach_from_mcg_out_bits {
4811 u8 status[0x8]; 4811 u8 status[0x8];
4812 u8 reserved_0[0x18]; 4812 u8 reserved_at_8[0x18];
4813 4813
4814 u8 syndrome[0x20]; 4814 u8 syndrome[0x20];
4815 4815
4816 u8 reserved_1[0x40]; 4816 u8 reserved_at_40[0x40];
4817}; 4817};
4818 4818
4819struct mlx5_ifc_detach_from_mcg_in_bits { 4819struct mlx5_ifc_detach_from_mcg_in_bits {
4820 u8 opcode[0x10]; 4820 u8 opcode[0x10];
4821 u8 reserved_0[0x10]; 4821 u8 reserved_at_10[0x10];
4822 4822
4823 u8 reserved_1[0x10]; 4823 u8 reserved_at_20[0x10];
4824 u8 op_mod[0x10]; 4824 u8 op_mod[0x10];
4825 4825
4826 u8 reserved_2[0x8]; 4826 u8 reserved_at_40[0x8];
4827 u8 qpn[0x18]; 4827 u8 qpn[0x18];
4828 4828
4829 u8 reserved_3[0x20]; 4829 u8 reserved_at_60[0x20];
4830 4830
4831 u8 multicast_gid[16][0x8]; 4831 u8 multicast_gid[16][0x8];
4832}; 4832};
4833 4833
4834struct mlx5_ifc_destroy_xrc_srq_out_bits { 4834struct mlx5_ifc_destroy_xrc_srq_out_bits {
4835 u8 status[0x8]; 4835 u8 status[0x8];
4836 u8 reserved_0[0x18]; 4836 u8 reserved_at_8[0x18];
4837 4837
4838 u8 syndrome[0x20]; 4838 u8 syndrome[0x20];
4839 4839
4840 u8 reserved_1[0x40]; 4840 u8 reserved_at_40[0x40];
4841}; 4841};
4842 4842
4843struct mlx5_ifc_destroy_xrc_srq_in_bits { 4843struct mlx5_ifc_destroy_xrc_srq_in_bits {
4844 u8 opcode[0x10]; 4844 u8 opcode[0x10];
4845 u8 reserved_0[0x10]; 4845 u8 reserved_at_10[0x10];
4846 4846
4847 u8 reserved_1[0x10]; 4847 u8 reserved_at_20[0x10];
4848 u8 op_mod[0x10]; 4848 u8 op_mod[0x10];
4849 4849
4850 u8 reserved_2[0x8]; 4850 u8 reserved_at_40[0x8];
4851 u8 xrc_srqn[0x18]; 4851 u8 xrc_srqn[0x18];
4852 4852
4853 u8 reserved_3[0x20]; 4853 u8 reserved_at_60[0x20];
4854}; 4854};
4855 4855
4856struct mlx5_ifc_destroy_tis_out_bits { 4856struct mlx5_ifc_destroy_tis_out_bits {
4857 u8 status[0x8]; 4857 u8 status[0x8];
4858 u8 reserved_0[0x18]; 4858 u8 reserved_at_8[0x18];
4859 4859
4860 u8 syndrome[0x20]; 4860 u8 syndrome[0x20];
4861 4861
4862 u8 reserved_1[0x40]; 4862 u8 reserved_at_40[0x40];
4863}; 4863};
4864 4864
4865struct mlx5_ifc_destroy_tis_in_bits { 4865struct mlx5_ifc_destroy_tis_in_bits {
4866 u8 opcode[0x10]; 4866 u8 opcode[0x10];
4867 u8 reserved_0[0x10]; 4867 u8 reserved_at_10[0x10];
4868 4868
4869 u8 reserved_1[0x10]; 4869 u8 reserved_at_20[0x10];
4870 u8 op_mod[0x10]; 4870 u8 op_mod[0x10];
4871 4871
4872 u8 reserved_2[0x8]; 4872 u8 reserved_at_40[0x8];
4873 u8 tisn[0x18]; 4873 u8 tisn[0x18];
4874 4874
4875 u8 reserved_3[0x20]; 4875 u8 reserved_at_60[0x20];
4876}; 4876};
4877 4877
4878struct mlx5_ifc_destroy_tir_out_bits { 4878struct mlx5_ifc_destroy_tir_out_bits {
4879 u8 status[0x8]; 4879 u8 status[0x8];
4880 u8 reserved_0[0x18]; 4880 u8 reserved_at_8[0x18];
4881 4881
4882 u8 syndrome[0x20]; 4882 u8 syndrome[0x20];
4883 4883
4884 u8 reserved_1[0x40]; 4884 u8 reserved_at_40[0x40];
4885}; 4885};
4886 4886
4887struct mlx5_ifc_destroy_tir_in_bits { 4887struct mlx5_ifc_destroy_tir_in_bits {
4888 u8 opcode[0x10]; 4888 u8 opcode[0x10];
4889 u8 reserved_0[0x10]; 4889 u8 reserved_at_10[0x10];
4890 4890
4891 u8 reserved_1[0x10]; 4891 u8 reserved_at_20[0x10];
4892 u8 op_mod[0x10]; 4892 u8 op_mod[0x10];
4893 4893
4894 u8 reserved_2[0x8]; 4894 u8 reserved_at_40[0x8];
4895 u8 tirn[0x18]; 4895 u8 tirn[0x18];
4896 4896
4897 u8 reserved_3[0x20]; 4897 u8 reserved_at_60[0x20];
4898}; 4898};
4899 4899
4900struct mlx5_ifc_destroy_srq_out_bits { 4900struct mlx5_ifc_destroy_srq_out_bits {
4901 u8 status[0x8]; 4901 u8 status[0x8];
4902 u8 reserved_0[0x18]; 4902 u8 reserved_at_8[0x18];
4903 4903
4904 u8 syndrome[0x20]; 4904 u8 syndrome[0x20];
4905 4905
4906 u8 reserved_1[0x40]; 4906 u8 reserved_at_40[0x40];
4907}; 4907};
4908 4908
4909struct mlx5_ifc_destroy_srq_in_bits { 4909struct mlx5_ifc_destroy_srq_in_bits {
4910 u8 opcode[0x10]; 4910 u8 opcode[0x10];
4911 u8 reserved_0[0x10]; 4911 u8 reserved_at_10[0x10];
4912 4912
4913 u8 reserved_1[0x10]; 4913 u8 reserved_at_20[0x10];
4914 u8 op_mod[0x10]; 4914 u8 op_mod[0x10];
4915 4915
4916 u8 reserved_2[0x8]; 4916 u8 reserved_at_40[0x8];
4917 u8 srqn[0x18]; 4917 u8 srqn[0x18];
4918 4918
4919 u8 reserved_3[0x20]; 4919 u8 reserved_at_60[0x20];
4920}; 4920};
4921 4921
4922struct mlx5_ifc_destroy_sq_out_bits { 4922struct mlx5_ifc_destroy_sq_out_bits {
4923 u8 status[0x8]; 4923 u8 status[0x8];
4924 u8 reserved_0[0x18]; 4924 u8 reserved_at_8[0x18];
4925 4925
4926 u8 syndrome[0x20]; 4926 u8 syndrome[0x20];
4927 4927
4928 u8 reserved_1[0x40]; 4928 u8 reserved_at_40[0x40];
4929}; 4929};
4930 4930
4931struct mlx5_ifc_destroy_sq_in_bits { 4931struct mlx5_ifc_destroy_sq_in_bits {
4932 u8 opcode[0x10]; 4932 u8 opcode[0x10];
4933 u8 reserved_0[0x10]; 4933 u8 reserved_at_10[0x10];
4934 4934
4935 u8 reserved_1[0x10]; 4935 u8 reserved_at_20[0x10];
4936 u8 op_mod[0x10]; 4936 u8 op_mod[0x10];
4937 4937
4938 u8 reserved_2[0x8]; 4938 u8 reserved_at_40[0x8];
4939 u8 sqn[0x18]; 4939 u8 sqn[0x18];
4940 4940
4941 u8 reserved_3[0x20]; 4941 u8 reserved_at_60[0x20];
4942}; 4942};
4943 4943
4944struct mlx5_ifc_destroy_rqt_out_bits { 4944struct mlx5_ifc_destroy_rqt_out_bits {
4945 u8 status[0x8]; 4945 u8 status[0x8];
4946 u8 reserved_0[0x18]; 4946 u8 reserved_at_8[0x18];
4947 4947
4948 u8 syndrome[0x20]; 4948 u8 syndrome[0x20];
4949 4949
4950 u8 reserved_1[0x40]; 4950 u8 reserved_at_40[0x40];
4951}; 4951};
4952 4952
4953struct mlx5_ifc_destroy_rqt_in_bits { 4953struct mlx5_ifc_destroy_rqt_in_bits {
4954 u8 opcode[0x10]; 4954 u8 opcode[0x10];
4955 u8 reserved_0[0x10]; 4955 u8 reserved_at_10[0x10];
4956 4956
4957 u8 reserved_1[0x10]; 4957 u8 reserved_at_20[0x10];
4958 u8 op_mod[0x10]; 4958 u8 op_mod[0x10];
4959 4959
4960 u8 reserved_2[0x8]; 4960 u8 reserved_at_40[0x8];
4961 u8 rqtn[0x18]; 4961 u8 rqtn[0x18];
4962 4962
4963 u8 reserved_3[0x20]; 4963 u8 reserved_at_60[0x20];
4964}; 4964};
4965 4965
4966struct mlx5_ifc_destroy_rq_out_bits { 4966struct mlx5_ifc_destroy_rq_out_bits {
4967 u8 status[0x8]; 4967 u8 status[0x8];
4968 u8 reserved_0[0x18]; 4968 u8 reserved_at_8[0x18];
4969 4969
4970 u8 syndrome[0x20]; 4970 u8 syndrome[0x20];
4971 4971
4972 u8 reserved_1[0x40]; 4972 u8 reserved_at_40[0x40];
4973}; 4973};
4974 4974
4975struct mlx5_ifc_destroy_rq_in_bits { 4975struct mlx5_ifc_destroy_rq_in_bits {
4976 u8 opcode[0x10]; 4976 u8 opcode[0x10];
4977 u8 reserved_0[0x10]; 4977 u8 reserved_at_10[0x10];
4978 4978
4979 u8 reserved_1[0x10]; 4979 u8 reserved_at_20[0x10];
4980 u8 op_mod[0x10]; 4980 u8 op_mod[0x10];
4981 4981
4982 u8 reserved_2[0x8]; 4982 u8 reserved_at_40[0x8];
4983 u8 rqn[0x18]; 4983 u8 rqn[0x18];
4984 4984
4985 u8 reserved_3[0x20]; 4985 u8 reserved_at_60[0x20];
4986}; 4986};
4987 4987
4988struct mlx5_ifc_destroy_rmp_out_bits { 4988struct mlx5_ifc_destroy_rmp_out_bits {
4989 u8 status[0x8]; 4989 u8 status[0x8];
4990 u8 reserved_0[0x18]; 4990 u8 reserved_at_8[0x18];
4991 4991
4992 u8 syndrome[0x20]; 4992 u8 syndrome[0x20];
4993 4993
4994 u8 reserved_1[0x40]; 4994 u8 reserved_at_40[0x40];
4995}; 4995};
4996 4996
4997struct mlx5_ifc_destroy_rmp_in_bits { 4997struct mlx5_ifc_destroy_rmp_in_bits {
4998 u8 opcode[0x10]; 4998 u8 opcode[0x10];
4999 u8 reserved_0[0x10]; 4999 u8 reserved_at_10[0x10];
5000 5000
5001 u8 reserved_1[0x10]; 5001 u8 reserved_at_20[0x10];
5002 u8 op_mod[0x10]; 5002 u8 op_mod[0x10];
5003 5003
5004 u8 reserved_2[0x8]; 5004 u8 reserved_at_40[0x8];
5005 u8 rmpn[0x18]; 5005 u8 rmpn[0x18];
5006 5006
5007 u8 reserved_3[0x20]; 5007 u8 reserved_at_60[0x20];
5008}; 5008};
5009 5009
5010struct mlx5_ifc_destroy_qp_out_bits { 5010struct mlx5_ifc_destroy_qp_out_bits {
5011 u8 status[0x8]; 5011 u8 status[0x8];
5012 u8 reserved_0[0x18]; 5012 u8 reserved_at_8[0x18];
5013 5013
5014 u8 syndrome[0x20]; 5014 u8 syndrome[0x20];
5015 5015
5016 u8 reserved_1[0x40]; 5016 u8 reserved_at_40[0x40];
5017}; 5017};
5018 5018
5019struct mlx5_ifc_destroy_qp_in_bits { 5019struct mlx5_ifc_destroy_qp_in_bits {
5020 u8 opcode[0x10]; 5020 u8 opcode[0x10];
5021 u8 reserved_0[0x10]; 5021 u8 reserved_at_10[0x10];
5022 5022
5023 u8 reserved_1[0x10]; 5023 u8 reserved_at_20[0x10];
5024 u8 op_mod[0x10]; 5024 u8 op_mod[0x10];
5025 5025
5026 u8 reserved_2[0x8]; 5026 u8 reserved_at_40[0x8];
5027 u8 qpn[0x18]; 5027 u8 qpn[0x18];
5028 5028
5029 u8 reserved_3[0x20]; 5029 u8 reserved_at_60[0x20];
5030}; 5030};
5031 5031
5032struct mlx5_ifc_destroy_psv_out_bits { 5032struct mlx5_ifc_destroy_psv_out_bits {
5033 u8 status[0x8]; 5033 u8 status[0x8];
5034 u8 reserved_0[0x18]; 5034 u8 reserved_at_8[0x18];
5035 5035
5036 u8 syndrome[0x20]; 5036 u8 syndrome[0x20];
5037 5037
5038 u8 reserved_1[0x40]; 5038 u8 reserved_at_40[0x40];
5039}; 5039};
5040 5040
5041struct mlx5_ifc_destroy_psv_in_bits { 5041struct mlx5_ifc_destroy_psv_in_bits {
5042 u8 opcode[0x10]; 5042 u8 opcode[0x10];
5043 u8 reserved_0[0x10]; 5043 u8 reserved_at_10[0x10];
5044 5044
5045 u8 reserved_1[0x10]; 5045 u8 reserved_at_20[0x10];
5046 u8 op_mod[0x10]; 5046 u8 op_mod[0x10];
5047 5047
5048 u8 reserved_2[0x8]; 5048 u8 reserved_at_40[0x8];
5049 u8 psvn[0x18]; 5049 u8 psvn[0x18];
5050 5050
5051 u8 reserved_3[0x20]; 5051 u8 reserved_at_60[0x20];
5052}; 5052};
5053 5053
5054struct mlx5_ifc_destroy_mkey_out_bits { 5054struct mlx5_ifc_destroy_mkey_out_bits {
5055 u8 status[0x8]; 5055 u8 status[0x8];
5056 u8 reserved_0[0x18]; 5056 u8 reserved_at_8[0x18];
5057 5057
5058 u8 syndrome[0x20]; 5058 u8 syndrome[0x20];
5059 5059
5060 u8 reserved_1[0x40]; 5060 u8 reserved_at_40[0x40];
5061}; 5061};
5062 5062
5063struct mlx5_ifc_destroy_mkey_in_bits { 5063struct mlx5_ifc_destroy_mkey_in_bits {
5064 u8 opcode[0x10]; 5064 u8 opcode[0x10];
5065 u8 reserved_0[0x10]; 5065 u8 reserved_at_10[0x10];
5066 5066
5067 u8 reserved_1[0x10]; 5067 u8 reserved_at_20[0x10];
5068 u8 op_mod[0x10]; 5068 u8 op_mod[0x10];
5069 5069
5070 u8 reserved_2[0x8]; 5070 u8 reserved_at_40[0x8];
5071 u8 mkey_index[0x18]; 5071 u8 mkey_index[0x18];
5072 5072
5073 u8 reserved_3[0x20]; 5073 u8 reserved_at_60[0x20];
5074}; 5074};
5075 5075
5076struct mlx5_ifc_destroy_flow_table_out_bits { 5076struct mlx5_ifc_destroy_flow_table_out_bits {
5077 u8 status[0x8]; 5077 u8 status[0x8];
5078 u8 reserved_0[0x18]; 5078 u8 reserved_at_8[0x18];
5079 5079
5080 u8 syndrome[0x20]; 5080 u8 syndrome[0x20];
5081 5081
5082 u8 reserved_1[0x40]; 5082 u8 reserved_at_40[0x40];
5083}; 5083};
5084 5084
5085struct mlx5_ifc_destroy_flow_table_in_bits { 5085struct mlx5_ifc_destroy_flow_table_in_bits {
5086 u8 opcode[0x10]; 5086 u8 opcode[0x10];
5087 u8 reserved_0[0x10]; 5087 u8 reserved_at_10[0x10];
5088 5088
5089 u8 reserved_1[0x10]; 5089 u8 reserved_at_20[0x10];
5090 u8 op_mod[0x10]; 5090 u8 op_mod[0x10];
5091 5091
5092 u8 reserved_2[0x40]; 5092 u8 reserved_at_40[0x40];
5093 5093
5094 u8 table_type[0x8]; 5094 u8 table_type[0x8];
5095 u8 reserved_3[0x18]; 5095 u8 reserved_at_88[0x18];
5096 5096
5097 u8 reserved_4[0x8]; 5097 u8 reserved_at_a0[0x8];
5098 u8 table_id[0x18]; 5098 u8 table_id[0x18];
5099 5099
5100 u8 reserved_5[0x140]; 5100 u8 reserved_at_c0[0x140];
5101}; 5101};
5102 5102
5103struct mlx5_ifc_destroy_flow_group_out_bits { 5103struct mlx5_ifc_destroy_flow_group_out_bits {
5104 u8 status[0x8]; 5104 u8 status[0x8];
5105 u8 reserved_0[0x18]; 5105 u8 reserved_at_8[0x18];
5106 5106
5107 u8 syndrome[0x20]; 5107 u8 syndrome[0x20];
5108 5108
5109 u8 reserved_1[0x40]; 5109 u8 reserved_at_40[0x40];
5110}; 5110};
5111 5111
5112struct mlx5_ifc_destroy_flow_group_in_bits { 5112struct mlx5_ifc_destroy_flow_group_in_bits {
5113 u8 opcode[0x10]; 5113 u8 opcode[0x10];
5114 u8 reserved_0[0x10]; 5114 u8 reserved_at_10[0x10];
5115 5115
5116 u8 reserved_1[0x10]; 5116 u8 reserved_at_20[0x10];
5117 u8 op_mod[0x10]; 5117 u8 op_mod[0x10];
5118 5118
5119 u8 reserved_2[0x40]; 5119 u8 reserved_at_40[0x40];
5120 5120
5121 u8 table_type[0x8]; 5121 u8 table_type[0x8];
5122 u8 reserved_3[0x18]; 5122 u8 reserved_at_88[0x18];
5123 5123
5124 u8 reserved_4[0x8]; 5124 u8 reserved_at_a0[0x8];
5125 u8 table_id[0x18]; 5125 u8 table_id[0x18];
5126 5126
5127 u8 group_id[0x20]; 5127 u8 group_id[0x20];
5128 5128
5129 u8 reserved_5[0x120]; 5129 u8 reserved_at_e0[0x120];
5130}; 5130};
5131 5131
5132struct mlx5_ifc_destroy_eq_out_bits { 5132struct mlx5_ifc_destroy_eq_out_bits {
5133 u8 status[0x8]; 5133 u8 status[0x8];
5134 u8 reserved_0[0x18]; 5134 u8 reserved_at_8[0x18];
5135 5135
5136 u8 syndrome[0x20]; 5136 u8 syndrome[0x20];
5137 5137
5138 u8 reserved_1[0x40]; 5138 u8 reserved_at_40[0x40];
5139}; 5139};
5140 5140
5141struct mlx5_ifc_destroy_eq_in_bits { 5141struct mlx5_ifc_destroy_eq_in_bits {
5142 u8 opcode[0x10]; 5142 u8 opcode[0x10];
5143 u8 reserved_0[0x10]; 5143 u8 reserved_at_10[0x10];
5144 5144
5145 u8 reserved_1[0x10]; 5145 u8 reserved_at_20[0x10];
5146 u8 op_mod[0x10]; 5146 u8 op_mod[0x10];
5147 5147
5148 u8 reserved_2[0x18]; 5148 u8 reserved_at_40[0x18];
5149 u8 eq_number[0x8]; 5149 u8 eq_number[0x8];
5150 5150
5151 u8 reserved_3[0x20]; 5151 u8 reserved_at_60[0x20];
5152}; 5152};
5153 5153
5154struct mlx5_ifc_destroy_dct_out_bits { 5154struct mlx5_ifc_destroy_dct_out_bits {
5155 u8 status[0x8]; 5155 u8 status[0x8];
5156 u8 reserved_0[0x18]; 5156 u8 reserved_at_8[0x18];
5157 5157
5158 u8 syndrome[0x20]; 5158 u8 syndrome[0x20];
5159 5159
5160 u8 reserved_1[0x40]; 5160 u8 reserved_at_40[0x40];
5161}; 5161};
5162 5162
5163struct mlx5_ifc_destroy_dct_in_bits { 5163struct mlx5_ifc_destroy_dct_in_bits {
5164 u8 opcode[0x10]; 5164 u8 opcode[0x10];
5165 u8 reserved_0[0x10]; 5165 u8 reserved_at_10[0x10];
5166 5166
5167 u8 reserved_1[0x10]; 5167 u8 reserved_at_20[0x10];
5168 u8 op_mod[0x10]; 5168 u8 op_mod[0x10];
5169 5169
5170 u8 reserved_2[0x8]; 5170 u8 reserved_at_40[0x8];
5171 u8 dctn[0x18]; 5171 u8 dctn[0x18];
5172 5172
5173 u8 reserved_3[0x20]; 5173 u8 reserved_at_60[0x20];
5174}; 5174};
5175 5175
5176struct mlx5_ifc_destroy_cq_out_bits { 5176struct mlx5_ifc_destroy_cq_out_bits {
5177 u8 status[0x8]; 5177 u8 status[0x8];
5178 u8 reserved_0[0x18]; 5178 u8 reserved_at_8[0x18];
5179 5179
5180 u8 syndrome[0x20]; 5180 u8 syndrome[0x20];
5181 5181
5182 u8 reserved_1[0x40]; 5182 u8 reserved_at_40[0x40];
5183}; 5183};
5184 5184
5185struct mlx5_ifc_destroy_cq_in_bits { 5185struct mlx5_ifc_destroy_cq_in_bits {
5186 u8 opcode[0x10]; 5186 u8 opcode[0x10];
5187 u8 reserved_0[0x10]; 5187 u8 reserved_at_10[0x10];
5188 5188
5189 u8 reserved_1[0x10]; 5189 u8 reserved_at_20[0x10];
5190 u8 op_mod[0x10]; 5190 u8 op_mod[0x10];
5191 5191
5192 u8 reserved_2[0x8]; 5192 u8 reserved_at_40[0x8];
5193 u8 cqn[0x18]; 5193 u8 cqn[0x18];
5194 5194
5195 u8 reserved_3[0x20]; 5195 u8 reserved_at_60[0x20];
5196}; 5196};
5197 5197
5198struct mlx5_ifc_delete_vxlan_udp_dport_out_bits { 5198struct mlx5_ifc_delete_vxlan_udp_dport_out_bits {
5199 u8 status[0x8]; 5199 u8 status[0x8];
5200 u8 reserved_0[0x18]; 5200 u8 reserved_at_8[0x18];
5201 5201
5202 u8 syndrome[0x20]; 5202 u8 syndrome[0x20];
5203 5203
5204 u8 reserved_1[0x40]; 5204 u8 reserved_at_40[0x40];
5205}; 5205};
5206 5206
5207struct mlx5_ifc_delete_vxlan_udp_dport_in_bits { 5207struct mlx5_ifc_delete_vxlan_udp_dport_in_bits {
5208 u8 opcode[0x10]; 5208 u8 opcode[0x10];
5209 u8 reserved_0[0x10]; 5209 u8 reserved_at_10[0x10];
5210 5210
5211 u8 reserved_1[0x10]; 5211 u8 reserved_at_20[0x10];
5212 u8 op_mod[0x10]; 5212 u8 op_mod[0x10];
5213 5213
5214 u8 reserved_2[0x20]; 5214 u8 reserved_at_40[0x20];
5215 5215
5216 u8 reserved_3[0x10]; 5216 u8 reserved_at_60[0x10];
5217 u8 vxlan_udp_port[0x10]; 5217 u8 vxlan_udp_port[0x10];
5218}; 5218};
5219 5219
5220struct mlx5_ifc_delete_l2_table_entry_out_bits { 5220struct mlx5_ifc_delete_l2_table_entry_out_bits {
5221 u8 status[0x8]; 5221 u8 status[0x8];
5222 u8 reserved_0[0x18]; 5222 u8 reserved_at_8[0x18];
5223 5223
5224 u8 syndrome[0x20]; 5224 u8 syndrome[0x20];
5225 5225
5226 u8 reserved_1[0x40]; 5226 u8 reserved_at_40[0x40];
5227}; 5227};
5228 5228
5229struct mlx5_ifc_delete_l2_table_entry_in_bits { 5229struct mlx5_ifc_delete_l2_table_entry_in_bits {
5230 u8 opcode[0x10]; 5230 u8 opcode[0x10];
5231 u8 reserved_0[0x10]; 5231 u8 reserved_at_10[0x10];
5232 5232
5233 u8 reserved_1[0x10]; 5233 u8 reserved_at_20[0x10];
5234 u8 op_mod[0x10]; 5234 u8 op_mod[0x10];
5235 5235
5236 u8 reserved_2[0x60]; 5236 u8 reserved_at_40[0x60];
5237 5237
5238 u8 reserved_3[0x8]; 5238 u8 reserved_at_a0[0x8];
5239 u8 table_index[0x18]; 5239 u8 table_index[0x18];
5240 5240
5241 u8 reserved_4[0x140]; 5241 u8 reserved_at_c0[0x140];
5242}; 5242};
5243 5243
5244struct mlx5_ifc_delete_fte_out_bits { 5244struct mlx5_ifc_delete_fte_out_bits {
5245 u8 status[0x8]; 5245 u8 status[0x8];
5246 u8 reserved_0[0x18]; 5246 u8 reserved_at_8[0x18];
5247 5247
5248 u8 syndrome[0x20]; 5248 u8 syndrome[0x20];
5249 5249
5250 u8 reserved_1[0x40]; 5250 u8 reserved_at_40[0x40];
5251}; 5251};
5252 5252
5253struct mlx5_ifc_delete_fte_in_bits { 5253struct mlx5_ifc_delete_fte_in_bits {
5254 u8 opcode[0x10]; 5254 u8 opcode[0x10];
5255 u8 reserved_0[0x10]; 5255 u8 reserved_at_10[0x10];
5256 5256
5257 u8 reserved_1[0x10]; 5257 u8 reserved_at_20[0x10];
5258 u8 op_mod[0x10]; 5258 u8 op_mod[0x10];
5259 5259
5260 u8 reserved_2[0x40]; 5260 u8 reserved_at_40[0x40];
5261 5261
5262 u8 table_type[0x8]; 5262 u8 table_type[0x8];
5263 u8 reserved_3[0x18]; 5263 u8 reserved_at_88[0x18];
5264 5264
5265 u8 reserved_4[0x8]; 5265 u8 reserved_at_a0[0x8];
5266 u8 table_id[0x18]; 5266 u8 table_id[0x18];
5267 5267
5268 u8 reserved_5[0x40]; 5268 u8 reserved_at_c0[0x40];
5269 5269
5270 u8 flow_index[0x20]; 5270 u8 flow_index[0x20];
5271 5271
5272 u8 reserved_6[0xe0]; 5272 u8 reserved_at_120[0xe0];
5273}; 5273};
5274 5274
5275struct mlx5_ifc_dealloc_xrcd_out_bits { 5275struct mlx5_ifc_dealloc_xrcd_out_bits {
5276 u8 status[0x8]; 5276 u8 status[0x8];
5277 u8 reserved_0[0x18]; 5277 u8 reserved_at_8[0x18];
5278 5278
5279 u8 syndrome[0x20]; 5279 u8 syndrome[0x20];
5280 5280
5281 u8 reserved_1[0x40]; 5281 u8 reserved_at_40[0x40];
5282}; 5282};
5283 5283
5284struct mlx5_ifc_dealloc_xrcd_in_bits { 5284struct mlx5_ifc_dealloc_xrcd_in_bits {
5285 u8 opcode[0x10]; 5285 u8 opcode[0x10];
5286 u8 reserved_0[0x10]; 5286 u8 reserved_at_10[0x10];
5287 5287
5288 u8 reserved_1[0x10]; 5288 u8 reserved_at_20[0x10];
5289 u8 op_mod[0x10]; 5289 u8 op_mod[0x10];
5290 5290
5291 u8 reserved_2[0x8]; 5291 u8 reserved_at_40[0x8];
5292 u8 xrcd[0x18]; 5292 u8 xrcd[0x18];
5293 5293
5294 u8 reserved_3[0x20]; 5294 u8 reserved_at_60[0x20];
5295}; 5295};
5296 5296
5297struct mlx5_ifc_dealloc_uar_out_bits { 5297struct mlx5_ifc_dealloc_uar_out_bits {
5298 u8 status[0x8]; 5298 u8 status[0x8];
5299 u8 reserved_0[0x18]; 5299 u8 reserved_at_8[0x18];
5300 5300
5301 u8 syndrome[0x20]; 5301 u8 syndrome[0x20];
5302 5302
5303 u8 reserved_1[0x40]; 5303 u8 reserved_at_40[0x40];
5304}; 5304};
5305 5305
5306struct mlx5_ifc_dealloc_uar_in_bits { 5306struct mlx5_ifc_dealloc_uar_in_bits {
5307 u8 opcode[0x10]; 5307 u8 opcode[0x10];
5308 u8 reserved_0[0x10]; 5308 u8 reserved_at_10[0x10];
5309 5309
5310 u8 reserved_1[0x10]; 5310 u8 reserved_at_20[0x10];
5311 u8 op_mod[0x10]; 5311 u8 op_mod[0x10];
5312 5312
5313 u8 reserved_2[0x8]; 5313 u8 reserved_at_40[0x8];
5314 u8 uar[0x18]; 5314 u8 uar[0x18];
5315 5315
5316 u8 reserved_3[0x20]; 5316 u8 reserved_at_60[0x20];
5317}; 5317};
5318 5318
5319struct mlx5_ifc_dealloc_transport_domain_out_bits { 5319struct mlx5_ifc_dealloc_transport_domain_out_bits {
5320 u8 status[0x8]; 5320 u8 status[0x8];
5321 u8 reserved_0[0x18]; 5321 u8 reserved_at_8[0x18];
5322 5322
5323 u8 syndrome[0x20]; 5323 u8 syndrome[0x20];
5324 5324
5325 u8 reserved_1[0x40]; 5325 u8 reserved_at_40[0x40];
5326}; 5326};
5327 5327
5328struct mlx5_ifc_dealloc_transport_domain_in_bits { 5328struct mlx5_ifc_dealloc_transport_domain_in_bits {
5329 u8 opcode[0x10]; 5329 u8 opcode[0x10];
5330 u8 reserved_0[0x10]; 5330 u8 reserved_at_10[0x10];
5331 5331
5332 u8 reserved_1[0x10]; 5332 u8 reserved_at_20[0x10];
5333 u8 op_mod[0x10]; 5333 u8 op_mod[0x10];
5334 5334
5335 u8 reserved_2[0x8]; 5335 u8 reserved_at_40[0x8];
5336 u8 transport_domain[0x18]; 5336 u8 transport_domain[0x18];
5337 5337
5338 u8 reserved_3[0x20]; 5338 u8 reserved_at_60[0x20];
5339}; 5339};
5340 5340
5341struct mlx5_ifc_dealloc_q_counter_out_bits { 5341struct mlx5_ifc_dealloc_q_counter_out_bits {
5342 u8 status[0x8]; 5342 u8 status[0x8];
5343 u8 reserved_0[0x18]; 5343 u8 reserved_at_8[0x18];
5344 5344
5345 u8 syndrome[0x20]; 5345 u8 syndrome[0x20];
5346 5346
5347 u8 reserved_1[0x40]; 5347 u8 reserved_at_40[0x40];
5348}; 5348};
5349 5349
5350struct mlx5_ifc_dealloc_q_counter_in_bits { 5350struct mlx5_ifc_dealloc_q_counter_in_bits {
5351 u8 opcode[0x10]; 5351 u8 opcode[0x10];
5352 u8 reserved_0[0x10]; 5352 u8 reserved_at_10[0x10];
5353 5353
5354 u8 reserved_1[0x10]; 5354 u8 reserved_at_20[0x10];
5355 u8 op_mod[0x10]; 5355 u8 op_mod[0x10];
5356 5356
5357 u8 reserved_2[0x18]; 5357 u8 reserved_at_40[0x18];
5358 u8 counter_set_id[0x8]; 5358 u8 counter_set_id[0x8];
5359 5359
5360 u8 reserved_3[0x20]; 5360 u8 reserved_at_60[0x20];
5361}; 5361};
5362 5362
5363struct mlx5_ifc_dealloc_pd_out_bits { 5363struct mlx5_ifc_dealloc_pd_out_bits {
5364 u8 status[0x8]; 5364 u8 status[0x8];
5365 u8 reserved_0[0x18]; 5365 u8 reserved_at_8[0x18];
5366 5366
5367 u8 syndrome[0x20]; 5367 u8 syndrome[0x20];
5368 5368
5369 u8 reserved_1[0x40]; 5369 u8 reserved_at_40[0x40];
5370}; 5370};
5371 5371
5372struct mlx5_ifc_dealloc_pd_in_bits { 5372struct mlx5_ifc_dealloc_pd_in_bits {
5373 u8 opcode[0x10]; 5373 u8 opcode[0x10];
5374 u8 reserved_0[0x10]; 5374 u8 reserved_at_10[0x10];
5375 5375
5376 u8 reserved_1[0x10]; 5376 u8 reserved_at_20[0x10];
5377 u8 op_mod[0x10]; 5377 u8 op_mod[0x10];
5378 5378
5379 u8 reserved_2[0x8]; 5379 u8 reserved_at_40[0x8];
5380 u8 pd[0x18]; 5380 u8 pd[0x18];
5381 5381
5382 u8 reserved_3[0x20]; 5382 u8 reserved_at_60[0x20];
5383}; 5383};
5384 5384
5385struct mlx5_ifc_create_xrc_srq_out_bits { 5385struct mlx5_ifc_create_xrc_srq_out_bits {
5386 u8 status[0x8]; 5386 u8 status[0x8];
5387 u8 reserved_0[0x18]; 5387 u8 reserved_at_8[0x18];
5388 5388
5389 u8 syndrome[0x20]; 5389 u8 syndrome[0x20];
5390 5390
5391 u8 reserved_1[0x8]; 5391 u8 reserved_at_40[0x8];
5392 u8 xrc_srqn[0x18]; 5392 u8 xrc_srqn[0x18];
5393 5393
5394 u8 reserved_2[0x20]; 5394 u8 reserved_at_60[0x20];
5395}; 5395};
5396 5396
5397struct mlx5_ifc_create_xrc_srq_in_bits { 5397struct mlx5_ifc_create_xrc_srq_in_bits {
5398 u8 opcode[0x10]; 5398 u8 opcode[0x10];
5399 u8 reserved_0[0x10]; 5399 u8 reserved_at_10[0x10];
5400 5400
5401 u8 reserved_1[0x10]; 5401 u8 reserved_at_20[0x10];
5402 u8 op_mod[0x10]; 5402 u8 op_mod[0x10];
5403 5403
5404 u8 reserved_2[0x40]; 5404 u8 reserved_at_40[0x40];
5405 5405
5406 struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; 5406 struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
5407 5407
5408 u8 reserved_3[0x600]; 5408 u8 reserved_at_280[0x600];
5409 5409
5410 u8 pas[0][0x40]; 5410 u8 pas[0][0x40];
5411}; 5411};
5412 5412
5413struct mlx5_ifc_create_tis_out_bits { 5413struct mlx5_ifc_create_tis_out_bits {
5414 u8 status[0x8]; 5414 u8 status[0x8];
5415 u8 reserved_0[0x18]; 5415 u8 reserved_at_8[0x18];
5416 5416
5417 u8 syndrome[0x20]; 5417 u8 syndrome[0x20];
5418 5418
5419 u8 reserved_1[0x8]; 5419 u8 reserved_at_40[0x8];
5420 u8 tisn[0x18]; 5420 u8 tisn[0x18];
5421 5421
5422 u8 reserved_2[0x20]; 5422 u8 reserved_at_60[0x20];
5423}; 5423};
5424 5424
5425struct mlx5_ifc_create_tis_in_bits { 5425struct mlx5_ifc_create_tis_in_bits {
5426 u8 opcode[0x10]; 5426 u8 opcode[0x10];
5427 u8 reserved_0[0x10]; 5427 u8 reserved_at_10[0x10];
5428 5428
5429 u8 reserved_1[0x10]; 5429 u8 reserved_at_20[0x10];
5430 u8 op_mod[0x10]; 5430 u8 op_mod[0x10];
5431 5431
5432 u8 reserved_2[0xc0]; 5432 u8 reserved_at_40[0xc0];
5433 5433
5434 struct mlx5_ifc_tisc_bits ctx; 5434 struct mlx5_ifc_tisc_bits ctx;
5435}; 5435};
5436 5436
5437struct mlx5_ifc_create_tir_out_bits { 5437struct mlx5_ifc_create_tir_out_bits {
5438 u8 status[0x8]; 5438 u8 status[0x8];
5439 u8 reserved_0[0x18]; 5439 u8 reserved_at_8[0x18];
5440 5440
5441 u8 syndrome[0x20]; 5441 u8 syndrome[0x20];
5442 5442
5443 u8 reserved_1[0x8]; 5443 u8 reserved_at_40[0x8];
5444 u8 tirn[0x18]; 5444 u8 tirn[0x18];
5445 5445
5446 u8 reserved_2[0x20]; 5446 u8 reserved_at_60[0x20];
5447}; 5447};
5448 5448
5449struct mlx5_ifc_create_tir_in_bits { 5449struct mlx5_ifc_create_tir_in_bits {
5450 u8 opcode[0x10]; 5450 u8 opcode[0x10];
5451 u8 reserved_0[0x10]; 5451 u8 reserved_at_10[0x10];
5452 5452
5453 u8 reserved_1[0x10]; 5453 u8 reserved_at_20[0x10];
5454 u8 op_mod[0x10]; 5454 u8 op_mod[0x10];
5455 5455
5456 u8 reserved_2[0xc0]; 5456 u8 reserved_at_40[0xc0];
5457 5457
5458 struct mlx5_ifc_tirc_bits ctx; 5458 struct mlx5_ifc_tirc_bits ctx;
5459}; 5459};
5460 5460
5461struct mlx5_ifc_create_srq_out_bits { 5461struct mlx5_ifc_create_srq_out_bits {
5462 u8 status[0x8]; 5462 u8 status[0x8];
5463 u8 reserved_0[0x18]; 5463 u8 reserved_at_8[0x18];
5464 5464
5465 u8 syndrome[0x20]; 5465 u8 syndrome[0x20];
5466 5466
5467 u8 reserved_1[0x8]; 5467 u8 reserved_at_40[0x8];
5468 u8 srqn[0x18]; 5468 u8 srqn[0x18];
5469 5469
5470 u8 reserved_2[0x20]; 5470 u8 reserved_at_60[0x20];
5471}; 5471};
5472 5472
5473struct mlx5_ifc_create_srq_in_bits { 5473struct mlx5_ifc_create_srq_in_bits {
5474 u8 opcode[0x10]; 5474 u8 opcode[0x10];
5475 u8 reserved_0[0x10]; 5475 u8 reserved_at_10[0x10];
5476 5476
5477 u8 reserved_1[0x10]; 5477 u8 reserved_at_20[0x10];
5478 u8 op_mod[0x10]; 5478 u8 op_mod[0x10];
5479 5479
5480 u8 reserved_2[0x40]; 5480 u8 reserved_at_40[0x40];
5481 5481
5482 struct mlx5_ifc_srqc_bits srq_context_entry; 5482 struct mlx5_ifc_srqc_bits srq_context_entry;
5483 5483
5484 u8 reserved_3[0x600]; 5484 u8 reserved_at_280[0x600];
5485 5485
5486 u8 pas[0][0x40]; 5486 u8 pas[0][0x40];
5487}; 5487};
5488 5488
5489struct mlx5_ifc_create_sq_out_bits { 5489struct mlx5_ifc_create_sq_out_bits {
5490 u8 status[0x8]; 5490 u8 status[0x8];
5491 u8 reserved_0[0x18]; 5491 u8 reserved_at_8[0x18];
5492 5492
5493 u8 syndrome[0x20]; 5493 u8 syndrome[0x20];
5494 5494
5495 u8 reserved_1[0x8]; 5495 u8 reserved_at_40[0x8];
5496 u8 sqn[0x18]; 5496 u8 sqn[0x18];
5497 5497
5498 u8 reserved_2[0x20]; 5498 u8 reserved_at_60[0x20];
5499}; 5499};
5500 5500
5501struct mlx5_ifc_create_sq_in_bits { 5501struct mlx5_ifc_create_sq_in_bits {
5502 u8 opcode[0x10]; 5502 u8 opcode[0x10];
5503 u8 reserved_0[0x10]; 5503 u8 reserved_at_10[0x10];
5504 5504
5505 u8 reserved_1[0x10]; 5505 u8 reserved_at_20[0x10];
5506 u8 op_mod[0x10]; 5506 u8 op_mod[0x10];
5507 5507
5508 u8 reserved_2[0xc0]; 5508 u8 reserved_at_40[0xc0];
5509 5509
5510 struct mlx5_ifc_sqc_bits ctx; 5510 struct mlx5_ifc_sqc_bits ctx;
5511}; 5511};
5512 5512
5513struct mlx5_ifc_create_rqt_out_bits { 5513struct mlx5_ifc_create_rqt_out_bits {
5514 u8 status[0x8]; 5514 u8 status[0x8];
5515 u8 reserved_0[0x18]; 5515 u8 reserved_at_8[0x18];
5516 5516
5517 u8 syndrome[0x20]; 5517 u8 syndrome[0x20];
5518 5518
5519 u8 reserved_1[0x8]; 5519 u8 reserved_at_40[0x8];
5520 u8 rqtn[0x18]; 5520 u8 rqtn[0x18];
5521 5521
5522 u8 reserved_2[0x20]; 5522 u8 reserved_at_60[0x20];
5523}; 5523};
5524 5524
5525struct mlx5_ifc_create_rqt_in_bits { 5525struct mlx5_ifc_create_rqt_in_bits {
5526 u8 opcode[0x10]; 5526 u8 opcode[0x10];
5527 u8 reserved_0[0x10]; 5527 u8 reserved_at_10[0x10];
5528 5528
5529 u8 reserved_1[0x10]; 5529 u8 reserved_at_20[0x10];
5530 u8 op_mod[0x10]; 5530 u8 op_mod[0x10];
5531 5531
5532 u8 reserved_2[0xc0]; 5532 u8 reserved_at_40[0xc0];
5533 5533
5534 struct mlx5_ifc_rqtc_bits rqt_context; 5534 struct mlx5_ifc_rqtc_bits rqt_context;
5535}; 5535};
5536 5536
5537struct mlx5_ifc_create_rq_out_bits { 5537struct mlx5_ifc_create_rq_out_bits {
5538 u8 status[0x8]; 5538 u8 status[0x8];
5539 u8 reserved_0[0x18]; 5539 u8 reserved_at_8[0x18];
5540 5540
5541 u8 syndrome[0x20]; 5541 u8 syndrome[0x20];
5542 5542
5543 u8 reserved_1[0x8]; 5543 u8 reserved_at_40[0x8];
5544 u8 rqn[0x18]; 5544 u8 rqn[0x18];
5545 5545
5546 u8 reserved_2[0x20]; 5546 u8 reserved_at_60[0x20];
5547}; 5547};
5548 5548
5549struct mlx5_ifc_create_rq_in_bits { 5549struct mlx5_ifc_create_rq_in_bits {
5550 u8 opcode[0x10]; 5550 u8 opcode[0x10];
5551 u8 reserved_0[0x10]; 5551 u8 reserved_at_10[0x10];
5552 5552
5553 u8 reserved_1[0x10]; 5553 u8 reserved_at_20[0x10];
5554 u8 op_mod[0x10]; 5554 u8 op_mod[0x10];
5555 5555
5556 u8 reserved_2[0xc0]; 5556 u8 reserved_at_40[0xc0];
5557 5557
5558 struct mlx5_ifc_rqc_bits ctx; 5558 struct mlx5_ifc_rqc_bits ctx;
5559}; 5559};
5560 5560
5561struct mlx5_ifc_create_rmp_out_bits { 5561struct mlx5_ifc_create_rmp_out_bits {
5562 u8 status[0x8]; 5562 u8 status[0x8];
5563 u8 reserved_0[0x18]; 5563 u8 reserved_at_8[0x18];
5564 5564
5565 u8 syndrome[0x20]; 5565 u8 syndrome[0x20];
5566 5566
5567 u8 reserved_1[0x8]; 5567 u8 reserved_at_40[0x8];
5568 u8 rmpn[0x18]; 5568 u8 rmpn[0x18];
5569 5569
5570 u8 reserved_2[0x20]; 5570 u8 reserved_at_60[0x20];
5571}; 5571};
5572 5572
5573struct mlx5_ifc_create_rmp_in_bits { 5573struct mlx5_ifc_create_rmp_in_bits {
5574 u8 opcode[0x10]; 5574 u8 opcode[0x10];
5575 u8 reserved_0[0x10]; 5575 u8 reserved_at_10[0x10];
5576 5576
5577 u8 reserved_1[0x10]; 5577 u8 reserved_at_20[0x10];
5578 u8 op_mod[0x10]; 5578 u8 op_mod[0x10];
5579 5579
5580 u8 reserved_2[0xc0]; 5580 u8 reserved_at_40[0xc0];
5581 5581
5582 struct mlx5_ifc_rmpc_bits ctx; 5582 struct mlx5_ifc_rmpc_bits ctx;
5583}; 5583};
5584 5584
5585struct mlx5_ifc_create_qp_out_bits { 5585struct mlx5_ifc_create_qp_out_bits {
5586 u8 status[0x8]; 5586 u8 status[0x8];
5587 u8 reserved_0[0x18]; 5587 u8 reserved_at_8[0x18];
5588 5588
5589 u8 syndrome[0x20]; 5589 u8 syndrome[0x20];
5590 5590
5591 u8 reserved_1[0x8]; 5591 u8 reserved_at_40[0x8];
5592 u8 qpn[0x18]; 5592 u8 qpn[0x18];
5593 5593
5594 u8 reserved_2[0x20]; 5594 u8 reserved_at_60[0x20];
5595}; 5595};
5596 5596
5597struct mlx5_ifc_create_qp_in_bits { 5597struct mlx5_ifc_create_qp_in_bits {
5598 u8 opcode[0x10]; 5598 u8 opcode[0x10];
5599 u8 reserved_0[0x10]; 5599 u8 reserved_at_10[0x10];
5600 5600
5601 u8 reserved_1[0x10]; 5601 u8 reserved_at_20[0x10];
5602 u8 op_mod[0x10]; 5602 u8 op_mod[0x10];
5603 5603
5604 u8 reserved_2[0x40]; 5604 u8 reserved_at_40[0x40];
5605 5605
5606 u8 opt_param_mask[0x20]; 5606 u8 opt_param_mask[0x20];
5607 5607
5608 u8 reserved_3[0x20]; 5608 u8 reserved_at_a0[0x20];
5609 5609
5610 struct mlx5_ifc_qpc_bits qpc; 5610 struct mlx5_ifc_qpc_bits qpc;
5611 5611
5612 u8 reserved_4[0x80]; 5612 u8 reserved_at_800[0x80];
5613 5613
5614 u8 pas[0][0x40]; 5614 u8 pas[0][0x40];
5615}; 5615};
5616 5616
5617struct mlx5_ifc_create_psv_out_bits { 5617struct mlx5_ifc_create_psv_out_bits {
5618 u8 status[0x8]; 5618 u8 status[0x8];
5619 u8 reserved_0[0x18]; 5619 u8 reserved_at_8[0x18];
5620 5620
5621 u8 syndrome[0x20]; 5621 u8 syndrome[0x20];
5622 5622
5623 u8 reserved_1[0x40]; 5623 u8 reserved_at_40[0x40];
5624 5624
5625 u8 reserved_2[0x8]; 5625 u8 reserved_at_80[0x8];
5626 u8 psv0_index[0x18]; 5626 u8 psv0_index[0x18];
5627 5627
5628 u8 reserved_3[0x8]; 5628 u8 reserved_at_a0[0x8];
5629 u8 psv1_index[0x18]; 5629 u8 psv1_index[0x18];
5630 5630
5631 u8 reserved_4[0x8]; 5631 u8 reserved_at_c0[0x8];
5632 u8 psv2_index[0x18]; 5632 u8 psv2_index[0x18];
5633 5633
5634 u8 reserved_5[0x8]; 5634 u8 reserved_at_e0[0x8];
5635 u8 psv3_index[0x18]; 5635 u8 psv3_index[0x18];
5636}; 5636};
5637 5637
5638struct mlx5_ifc_create_psv_in_bits { 5638struct mlx5_ifc_create_psv_in_bits {
5639 u8 opcode[0x10]; 5639 u8 opcode[0x10];
5640 u8 reserved_0[0x10]; 5640 u8 reserved_at_10[0x10];
5641 5641
5642 u8 reserved_1[0x10]; 5642 u8 reserved_at_20[0x10];
5643 u8 op_mod[0x10]; 5643 u8 op_mod[0x10];
5644 5644
5645 u8 num_psv[0x4]; 5645 u8 num_psv[0x4];
5646 u8 reserved_2[0x4]; 5646 u8 reserved_at_44[0x4];
5647 u8 pd[0x18]; 5647 u8 pd[0x18];
5648 5648
5649 u8 reserved_3[0x20]; 5649 u8 reserved_at_60[0x20];
5650}; 5650};
5651 5651
5652struct mlx5_ifc_create_mkey_out_bits { 5652struct mlx5_ifc_create_mkey_out_bits {
5653 u8 status[0x8]; 5653 u8 status[0x8];
5654 u8 reserved_0[0x18]; 5654 u8 reserved_at_8[0x18];
5655 5655
5656 u8 syndrome[0x20]; 5656 u8 syndrome[0x20];
5657 5657
5658 u8 reserved_1[0x8]; 5658 u8 reserved_at_40[0x8];
5659 u8 mkey_index[0x18]; 5659 u8 mkey_index[0x18];
5660 5660
5661 u8 reserved_2[0x20]; 5661 u8 reserved_at_60[0x20];
5662}; 5662};
5663 5663
5664struct mlx5_ifc_create_mkey_in_bits { 5664struct mlx5_ifc_create_mkey_in_bits {
5665 u8 opcode[0x10]; 5665 u8 opcode[0x10];
5666 u8 reserved_0[0x10]; 5666 u8 reserved_at_10[0x10];
5667 5667
5668 u8 reserved_1[0x10]; 5668 u8 reserved_at_20[0x10];
5669 u8 op_mod[0x10]; 5669 u8 op_mod[0x10];
5670 5670
5671 u8 reserved_2[0x20]; 5671 u8 reserved_at_40[0x20];
5672 5672
5673 u8 pg_access[0x1]; 5673 u8 pg_access[0x1];
5674 u8 reserved_3[0x1f]; 5674 u8 reserved_at_61[0x1f];
5675 5675
5676 struct mlx5_ifc_mkc_bits memory_key_mkey_entry; 5676 struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
5677 5677
5678 u8 reserved_4[0x80]; 5678 u8 reserved_at_280[0x80];
5679 5679
5680 u8 translations_octword_actual_size[0x20]; 5680 u8 translations_octword_actual_size[0x20];
5681 5681
5682 u8 reserved_5[0x560]; 5682 u8 reserved_at_320[0x560];
5683 5683
5684 u8 klm_pas_mtt[0][0x20]; 5684 u8 klm_pas_mtt[0][0x20];
5685}; 5685};
5686 5686
5687struct mlx5_ifc_create_flow_table_out_bits { 5687struct mlx5_ifc_create_flow_table_out_bits {
5688 u8 status[0x8]; 5688 u8 status[0x8];
5689 u8 reserved_0[0x18]; 5689 u8 reserved_at_8[0x18];
5690 5690
5691 u8 syndrome[0x20]; 5691 u8 syndrome[0x20];
5692 5692
5693 u8 reserved_1[0x8]; 5693 u8 reserved_at_40[0x8];
5694 u8 table_id[0x18]; 5694 u8 table_id[0x18];
5695 5695
5696 u8 reserved_2[0x20]; 5696 u8 reserved_at_60[0x20];
5697}; 5697};
5698 5698
5699struct mlx5_ifc_create_flow_table_in_bits { 5699struct mlx5_ifc_create_flow_table_in_bits {
5700 u8 opcode[0x10]; 5700 u8 opcode[0x10];
5701 u8 reserved_0[0x10]; 5701 u8 reserved_at_10[0x10];
5702 5702
5703 u8 reserved_1[0x10]; 5703 u8 reserved_at_20[0x10];
5704 u8 op_mod[0x10]; 5704 u8 op_mod[0x10];
5705 5705
5706 u8 reserved_2[0x40]; 5706 u8 reserved_at_40[0x40];
5707 5707
5708 u8 table_type[0x8]; 5708 u8 table_type[0x8];
5709 u8 reserved_3[0x18]; 5709 u8 reserved_at_88[0x18];
5710 5710
5711 u8 reserved_4[0x20]; 5711 u8 reserved_at_a0[0x20];
5712 5712
5713 u8 reserved_5[0x4]; 5713 u8 reserved_at_c0[0x4];
5714 u8 table_miss_mode[0x4]; 5714 u8 table_miss_mode[0x4];
5715 u8 level[0x8]; 5715 u8 level[0x8];
5716 u8 reserved_6[0x8]; 5716 u8 reserved_at_d0[0x8];
5717 u8 log_size[0x8]; 5717 u8 log_size[0x8];
5718 5718
5719 u8 reserved_7[0x8]; 5719 u8 reserved_at_e0[0x8];
5720 u8 table_miss_id[0x18]; 5720 u8 table_miss_id[0x18];
5721 5721
5722 u8 reserved_8[0x100]; 5722 u8 reserved_at_100[0x100];
5723}; 5723};
5724 5724
5725struct mlx5_ifc_create_flow_group_out_bits { 5725struct mlx5_ifc_create_flow_group_out_bits {
5726 u8 status[0x8]; 5726 u8 status[0x8];
5727 u8 reserved_0[0x18]; 5727 u8 reserved_at_8[0x18];
5728 5728
5729 u8 syndrome[0x20]; 5729 u8 syndrome[0x20];
5730 5730
5731 u8 reserved_1[0x8]; 5731 u8 reserved_at_40[0x8];
5732 u8 group_id[0x18]; 5732 u8 group_id[0x18];
5733 5733
5734 u8 reserved_2[0x20]; 5734 u8 reserved_at_60[0x20];
5735}; 5735};
5736 5736
5737enum { 5737enum {
@@ -5742,134 +5742,134 @@ enum {
5742 5742
5743struct mlx5_ifc_create_flow_group_in_bits { 5743struct mlx5_ifc_create_flow_group_in_bits {
5744 u8 opcode[0x10]; 5744 u8 opcode[0x10];
5745 u8 reserved_0[0x10]; 5745 u8 reserved_at_10[0x10];
5746 5746
5747 u8 reserved_1[0x10]; 5747 u8 reserved_at_20[0x10];
5748 u8 op_mod[0x10]; 5748 u8 op_mod[0x10];
5749 5749
5750 u8 reserved_2[0x40]; 5750 u8 reserved_at_40[0x40];
5751 5751
5752 u8 table_type[0x8]; 5752 u8 table_type[0x8];
5753 u8 reserved_3[0x18]; 5753 u8 reserved_at_88[0x18];
5754 5754
5755 u8 reserved_4[0x8]; 5755 u8 reserved_at_a0[0x8];
5756 u8 table_id[0x18]; 5756 u8 table_id[0x18];
5757 5757
5758 u8 reserved_5[0x20]; 5758 u8 reserved_at_c0[0x20];
5759 5759
5760 u8 start_flow_index[0x20]; 5760 u8 start_flow_index[0x20];
5761 5761
5762 u8 reserved_6[0x20]; 5762 u8 reserved_at_100[0x20];
5763 5763
5764 u8 end_flow_index[0x20]; 5764 u8 end_flow_index[0x20];
5765 5765
5766 u8 reserved_7[0xa0]; 5766 u8 reserved_at_140[0xa0];
5767 5767
5768 u8 reserved_8[0x18]; 5768 u8 reserved_at_1e0[0x18];
5769 u8 match_criteria_enable[0x8]; 5769 u8 match_criteria_enable[0x8];
5770 5770
5771 struct mlx5_ifc_fte_match_param_bits match_criteria; 5771 struct mlx5_ifc_fte_match_param_bits match_criteria;
5772 5772
5773 u8 reserved_9[0xe00]; 5773 u8 reserved_at_1200[0xe00];
5774}; 5774};
5775 5775
5776struct mlx5_ifc_create_eq_out_bits { 5776struct mlx5_ifc_create_eq_out_bits {
5777 u8 status[0x8]; 5777 u8 status[0x8];
5778 u8 reserved_0[0x18]; 5778 u8 reserved_at_8[0x18];
5779 5779
5780 u8 syndrome[0x20]; 5780 u8 syndrome[0x20];
5781 5781
5782 u8 reserved_1[0x18]; 5782 u8 reserved_at_40[0x18];
5783 u8 eq_number[0x8]; 5783 u8 eq_number[0x8];
5784 5784
5785 u8 reserved_2[0x20]; 5785 u8 reserved_at_60[0x20];
5786}; 5786};
5787 5787
5788struct mlx5_ifc_create_eq_in_bits { 5788struct mlx5_ifc_create_eq_in_bits {
5789 u8 opcode[0x10]; 5789 u8 opcode[0x10];
5790 u8 reserved_0[0x10]; 5790 u8 reserved_at_10[0x10];
5791 5791
5792 u8 reserved_1[0x10]; 5792 u8 reserved_at_20[0x10];
5793 u8 op_mod[0x10]; 5793 u8 op_mod[0x10];
5794 5794
5795 u8 reserved_2[0x40]; 5795 u8 reserved_at_40[0x40];
5796 5796
5797 struct mlx5_ifc_eqc_bits eq_context_entry; 5797 struct mlx5_ifc_eqc_bits eq_context_entry;
5798 5798
5799 u8 reserved_3[0x40]; 5799 u8 reserved_at_280[0x40];
5800 5800
5801 u8 event_bitmask[0x40]; 5801 u8 event_bitmask[0x40];
5802 5802
5803 u8 reserved_4[0x580]; 5803 u8 reserved_at_300[0x580];
5804 5804
5805 u8 pas[0][0x40]; 5805 u8 pas[0][0x40];
5806}; 5806};
5807 5807
5808struct mlx5_ifc_create_dct_out_bits { 5808struct mlx5_ifc_create_dct_out_bits {
5809 u8 status[0x8]; 5809 u8 status[0x8];
5810 u8 reserved_0[0x18]; 5810 u8 reserved_at_8[0x18];
5811 5811
5812 u8 syndrome[0x20]; 5812 u8 syndrome[0x20];
5813 5813
5814 u8 reserved_1[0x8]; 5814 u8 reserved_at_40[0x8];
5815 u8 dctn[0x18]; 5815 u8 dctn[0x18];
5816 5816
5817 u8 reserved_2[0x20]; 5817 u8 reserved_at_60[0x20];
5818}; 5818};
5819 5819
5820struct mlx5_ifc_create_dct_in_bits { 5820struct mlx5_ifc_create_dct_in_bits {
5821 u8 opcode[0x10]; 5821 u8 opcode[0x10];
5822 u8 reserved_0[0x10]; 5822 u8 reserved_at_10[0x10];
5823 5823
5824 u8 reserved_1[0x10]; 5824 u8 reserved_at_20[0x10];
5825 u8 op_mod[0x10]; 5825 u8 op_mod[0x10];
5826 5826
5827 u8 reserved_2[0x40]; 5827 u8 reserved_at_40[0x40];
5828 5828
5829 struct mlx5_ifc_dctc_bits dct_context_entry; 5829 struct mlx5_ifc_dctc_bits dct_context_entry;
5830 5830
5831 u8 reserved_3[0x180]; 5831 u8 reserved_at_280[0x180];
5832}; 5832};
5833 5833
5834struct mlx5_ifc_create_cq_out_bits { 5834struct mlx5_ifc_create_cq_out_bits {
5835 u8 status[0x8]; 5835 u8 status[0x8];
5836 u8 reserved_0[0x18]; 5836 u8 reserved_at_8[0x18];
5837 5837
5838 u8 syndrome[0x20]; 5838 u8 syndrome[0x20];
5839 5839
5840 u8 reserved_1[0x8]; 5840 u8 reserved_at_40[0x8];
5841 u8 cqn[0x18]; 5841 u8 cqn[0x18];
5842 5842
5843 u8 reserved_2[0x20]; 5843 u8 reserved_at_60[0x20];
5844}; 5844};
5845 5845
5846struct mlx5_ifc_create_cq_in_bits { 5846struct mlx5_ifc_create_cq_in_bits {
5847 u8 opcode[0x10]; 5847 u8 opcode[0x10];
5848 u8 reserved_0[0x10]; 5848 u8 reserved_at_10[0x10];
5849 5849
5850 u8 reserved_1[0x10]; 5850 u8 reserved_at_20[0x10];
5851 u8 op_mod[0x10]; 5851 u8 op_mod[0x10];
5852 5852
5853 u8 reserved_2[0x40]; 5853 u8 reserved_at_40[0x40];
5854 5854
5855 struct mlx5_ifc_cqc_bits cq_context; 5855 struct mlx5_ifc_cqc_bits cq_context;
5856 5856
5857 u8 reserved_3[0x600]; 5857 u8 reserved_at_280[0x600];
5858 5858
5859 u8 pas[0][0x40]; 5859 u8 pas[0][0x40];
5860}; 5860};
5861 5861
5862struct mlx5_ifc_config_int_moderation_out_bits { 5862struct mlx5_ifc_config_int_moderation_out_bits {
5863 u8 status[0x8]; 5863 u8 status[0x8];
5864 u8 reserved_0[0x18]; 5864 u8 reserved_at_8[0x18];
5865 5865
5866 u8 syndrome[0x20]; 5866 u8 syndrome[0x20];
5867 5867
5868 u8 reserved_1[0x4]; 5868 u8 reserved_at_40[0x4];
5869 u8 min_delay[0xc]; 5869 u8 min_delay[0xc];
5870 u8 int_vector[0x10]; 5870 u8 int_vector[0x10];
5871 5871
5872 u8 reserved_2[0x20]; 5872 u8 reserved_at_60[0x20];
5873}; 5873};
5874 5874
5875enum { 5875enum {
@@ -5879,49 +5879,49 @@ enum {
5879 5879
5880struct mlx5_ifc_config_int_moderation_in_bits { 5880struct mlx5_ifc_config_int_moderation_in_bits {
5881 u8 opcode[0x10]; 5881 u8 opcode[0x10];
5882 u8 reserved_0[0x10]; 5882 u8 reserved_at_10[0x10];
5883 5883
5884 u8 reserved_1[0x10]; 5884 u8 reserved_at_20[0x10];
5885 u8 op_mod[0x10]; 5885 u8 op_mod[0x10];
5886 5886
5887 u8 reserved_2[0x4]; 5887 u8 reserved_at_40[0x4];
5888 u8 min_delay[0xc]; 5888 u8 min_delay[0xc];
5889 u8 int_vector[0x10]; 5889 u8 int_vector[0x10];
5890 5890
5891 u8 reserved_3[0x20]; 5891 u8 reserved_at_60[0x20];
5892}; 5892};
5893 5893
5894struct mlx5_ifc_attach_to_mcg_out_bits { 5894struct mlx5_ifc_attach_to_mcg_out_bits {
5895 u8 status[0x8]; 5895 u8 status[0x8];
5896 u8 reserved_0[0x18]; 5896 u8 reserved_at_8[0x18];
5897 5897
5898 u8 syndrome[0x20]; 5898 u8 syndrome[0x20];
5899 5899
5900 u8 reserved_1[0x40]; 5900 u8 reserved_at_40[0x40];
5901}; 5901};
5902 5902
5903struct mlx5_ifc_attach_to_mcg_in_bits { 5903struct mlx5_ifc_attach_to_mcg_in_bits {
5904 u8 opcode[0x10]; 5904 u8 opcode[0x10];
5905 u8 reserved_0[0x10]; 5905 u8 reserved_at_10[0x10];
5906 5906
5907 u8 reserved_1[0x10]; 5907 u8 reserved_at_20[0x10];
5908 u8 op_mod[0x10]; 5908 u8 op_mod[0x10];
5909 5909
5910 u8 reserved_2[0x8]; 5910 u8 reserved_at_40[0x8];
5911 u8 qpn[0x18]; 5911 u8 qpn[0x18];
5912 5912
5913 u8 reserved_3[0x20]; 5913 u8 reserved_at_60[0x20];
5914 5914
5915 u8 multicast_gid[16][0x8]; 5915 u8 multicast_gid[16][0x8];
5916}; 5916};
5917 5917
5918struct mlx5_ifc_arm_xrc_srq_out_bits { 5918struct mlx5_ifc_arm_xrc_srq_out_bits {
5919 u8 status[0x8]; 5919 u8 status[0x8];
5920 u8 reserved_0[0x18]; 5920 u8 reserved_at_8[0x18];
5921 5921
5922 u8 syndrome[0x20]; 5922 u8 syndrome[0x20];
5923 5923
5924 u8 reserved_1[0x40]; 5924 u8 reserved_at_40[0x40];
5925}; 5925};
5926 5926
5927enum { 5927enum {
@@ -5930,25 +5930,25 @@ enum {
5930 5930
5931struct mlx5_ifc_arm_xrc_srq_in_bits { 5931struct mlx5_ifc_arm_xrc_srq_in_bits {
5932 u8 opcode[0x10]; 5932 u8 opcode[0x10];
5933 u8 reserved_0[0x10]; 5933 u8 reserved_at_10[0x10];
5934 5934
5935 u8 reserved_1[0x10]; 5935 u8 reserved_at_20[0x10];
5936 u8 op_mod[0x10]; 5936 u8 op_mod[0x10];
5937 5937
5938 u8 reserved_2[0x8]; 5938 u8 reserved_at_40[0x8];
5939 u8 xrc_srqn[0x18]; 5939 u8 xrc_srqn[0x18];
5940 5940
5941 u8 reserved_3[0x10]; 5941 u8 reserved_at_60[0x10];
5942 u8 lwm[0x10]; 5942 u8 lwm[0x10];
5943}; 5943};
5944 5944
5945struct mlx5_ifc_arm_rq_out_bits { 5945struct mlx5_ifc_arm_rq_out_bits {
5946 u8 status[0x8]; 5946 u8 status[0x8];
5947 u8 reserved_0[0x18]; 5947 u8 reserved_at_8[0x18];
5948 5948
5949 u8 syndrome[0x20]; 5949 u8 syndrome[0x20];
5950 5950
5951 u8 reserved_1[0x40]; 5951 u8 reserved_at_40[0x40];
5952}; 5952};
5953 5953
5954enum { 5954enum {
@@ -5957,179 +5957,179 @@ enum {
5957 5957
5958struct mlx5_ifc_arm_rq_in_bits { 5958struct mlx5_ifc_arm_rq_in_bits {
5959 u8 opcode[0x10]; 5959 u8 opcode[0x10];
5960 u8 reserved_0[0x10]; 5960 u8 reserved_at_10[0x10];
5961 5961
5962 u8 reserved_1[0x10]; 5962 u8 reserved_at_20[0x10];
5963 u8 op_mod[0x10]; 5963 u8 op_mod[0x10];
5964 5964
5965 u8 reserved_2[0x8]; 5965 u8 reserved_at_40[0x8];
5966 u8 srq_number[0x18]; 5966 u8 srq_number[0x18];
5967 5967
5968 u8 reserved_3[0x10]; 5968 u8 reserved_at_60[0x10];
5969 u8 lwm[0x10]; 5969 u8 lwm[0x10];
5970}; 5970};
5971 5971
5972struct mlx5_ifc_arm_dct_out_bits { 5972struct mlx5_ifc_arm_dct_out_bits {
5973 u8 status[0x8]; 5973 u8 status[0x8];
5974 u8 reserved_0[0x18]; 5974 u8 reserved_at_8[0x18];
5975 5975
5976 u8 syndrome[0x20]; 5976 u8 syndrome[0x20];
5977 5977
5978 u8 reserved_1[0x40]; 5978 u8 reserved_at_40[0x40];
5979}; 5979};
5980 5980
5981struct mlx5_ifc_arm_dct_in_bits { 5981struct mlx5_ifc_arm_dct_in_bits {
5982 u8 opcode[0x10]; 5982 u8 opcode[0x10];
5983 u8 reserved_0[0x10]; 5983 u8 reserved_at_10[0x10];
5984 5984
5985 u8 reserved_1[0x10]; 5985 u8 reserved_at_20[0x10];
5986 u8 op_mod[0x10]; 5986 u8 op_mod[0x10];
5987 5987
5988 u8 reserved_2[0x8]; 5988 u8 reserved_at_40[0x8];
5989 u8 dct_number[0x18]; 5989 u8 dct_number[0x18];
5990 5990
5991 u8 reserved_3[0x20]; 5991 u8 reserved_at_60[0x20];
5992}; 5992};
5993 5993
5994struct mlx5_ifc_alloc_xrcd_out_bits { 5994struct mlx5_ifc_alloc_xrcd_out_bits {
5995 u8 status[0x8]; 5995 u8 status[0x8];
5996 u8 reserved_0[0x18]; 5996 u8 reserved_at_8[0x18];
5997 5997
5998 u8 syndrome[0x20]; 5998 u8 syndrome[0x20];
5999 5999
6000 u8 reserved_1[0x8]; 6000 u8 reserved_at_40[0x8];
6001 u8 xrcd[0x18]; 6001 u8 xrcd[0x18];
6002 6002
6003 u8 reserved_2[0x20]; 6003 u8 reserved_at_60[0x20];
6004}; 6004};
6005 6005
6006struct mlx5_ifc_alloc_xrcd_in_bits { 6006struct mlx5_ifc_alloc_xrcd_in_bits {
6007 u8 opcode[0x10]; 6007 u8 opcode[0x10];
6008 u8 reserved_0[0x10]; 6008 u8 reserved_at_10[0x10];
6009 6009
6010 u8 reserved_1[0x10]; 6010 u8 reserved_at_20[0x10];
6011 u8 op_mod[0x10]; 6011 u8 op_mod[0x10];
6012 6012
6013 u8 reserved_2[0x40]; 6013 u8 reserved_at_40[0x40];
6014}; 6014};
6015 6015
6016struct mlx5_ifc_alloc_uar_out_bits { 6016struct mlx5_ifc_alloc_uar_out_bits {
6017 u8 status[0x8]; 6017 u8 status[0x8];
6018 u8 reserved_0[0x18]; 6018 u8 reserved_at_8[0x18];
6019 6019
6020 u8 syndrome[0x20]; 6020 u8 syndrome[0x20];
6021 6021
6022 u8 reserved_1[0x8]; 6022 u8 reserved_at_40[0x8];
6023 u8 uar[0x18]; 6023 u8 uar[0x18];
6024 6024
6025 u8 reserved_2[0x20]; 6025 u8 reserved_at_60[0x20];
6026}; 6026};
6027 6027
6028struct mlx5_ifc_alloc_uar_in_bits { 6028struct mlx5_ifc_alloc_uar_in_bits {
6029 u8 opcode[0x10]; 6029 u8 opcode[0x10];
6030 u8 reserved_0[0x10]; 6030 u8 reserved_at_10[0x10];
6031 6031
6032 u8 reserved_1[0x10]; 6032 u8 reserved_at_20[0x10];
6033 u8 op_mod[0x10]; 6033 u8 op_mod[0x10];
6034 6034
6035 u8 reserved_2[0x40]; 6035 u8 reserved_at_40[0x40];
6036}; 6036};
6037 6037
6038struct mlx5_ifc_alloc_transport_domain_out_bits { 6038struct mlx5_ifc_alloc_transport_domain_out_bits {
6039 u8 status[0x8]; 6039 u8 status[0x8];
6040 u8 reserved_0[0x18]; 6040 u8 reserved_at_8[0x18];
6041 6041
6042 u8 syndrome[0x20]; 6042 u8 syndrome[0x20];
6043 6043
6044 u8 reserved_1[0x8]; 6044 u8 reserved_at_40[0x8];
6045 u8 transport_domain[0x18]; 6045 u8 transport_domain[0x18];
6046 6046
6047 u8 reserved_2[0x20]; 6047 u8 reserved_at_60[0x20];
6048}; 6048};
6049 6049
6050struct mlx5_ifc_alloc_transport_domain_in_bits { 6050struct mlx5_ifc_alloc_transport_domain_in_bits {
6051 u8 opcode[0x10]; 6051 u8 opcode[0x10];
6052 u8 reserved_0[0x10]; 6052 u8 reserved_at_10[0x10];
6053 6053
6054 u8 reserved_1[0x10]; 6054 u8 reserved_at_20[0x10];
6055 u8 op_mod[0x10]; 6055 u8 op_mod[0x10];
6056 6056
6057 u8 reserved_2[0x40]; 6057 u8 reserved_at_40[0x40];
6058}; 6058};
6059 6059
6060struct mlx5_ifc_alloc_q_counter_out_bits { 6060struct mlx5_ifc_alloc_q_counter_out_bits {
6061 u8 status[0x8]; 6061 u8 status[0x8];
6062 u8 reserved_0[0x18]; 6062 u8 reserved_at_8[0x18];
6063 6063
6064 u8 syndrome[0x20]; 6064 u8 syndrome[0x20];
6065 6065
6066 u8 reserved_1[0x18]; 6066 u8 reserved_at_40[0x18];
6067 u8 counter_set_id[0x8]; 6067 u8 counter_set_id[0x8];
6068 6068
6069 u8 reserved_2[0x20]; 6069 u8 reserved_at_60[0x20];
6070}; 6070};
6071 6071
6072struct mlx5_ifc_alloc_q_counter_in_bits { 6072struct mlx5_ifc_alloc_q_counter_in_bits {
6073 u8 opcode[0x10]; 6073 u8 opcode[0x10];
6074 u8 reserved_0[0x10]; 6074 u8 reserved_at_10[0x10];
6075 6075
6076 u8 reserved_1[0x10]; 6076 u8 reserved_at_20[0x10];
6077 u8 op_mod[0x10]; 6077 u8 op_mod[0x10];
6078 6078
6079 u8 reserved_2[0x40]; 6079 u8 reserved_at_40[0x40];
6080}; 6080};
6081 6081
6082struct mlx5_ifc_alloc_pd_out_bits { 6082struct mlx5_ifc_alloc_pd_out_bits {
6083 u8 status[0x8]; 6083 u8 status[0x8];
6084 u8 reserved_0[0x18]; 6084 u8 reserved_at_8[0x18];
6085 6085
6086 u8 syndrome[0x20]; 6086 u8 syndrome[0x20];
6087 6087
6088 u8 reserved_1[0x8]; 6088 u8 reserved_at_40[0x8];
6089 u8 pd[0x18]; 6089 u8 pd[0x18];
6090 6090
6091 u8 reserved_2[0x20]; 6091 u8 reserved_at_60[0x20];
6092}; 6092};
6093 6093
6094struct mlx5_ifc_alloc_pd_in_bits { 6094struct mlx5_ifc_alloc_pd_in_bits {
6095 u8 opcode[0x10]; 6095 u8 opcode[0x10];
6096 u8 reserved_0[0x10]; 6096 u8 reserved_at_10[0x10];
6097 6097
6098 u8 reserved_1[0x10]; 6098 u8 reserved_at_20[0x10];
6099 u8 op_mod[0x10]; 6099 u8 op_mod[0x10];
6100 6100
6101 u8 reserved_2[0x40]; 6101 u8 reserved_at_40[0x40];
6102}; 6102};
6103 6103
6104struct mlx5_ifc_add_vxlan_udp_dport_out_bits { 6104struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
6105 u8 status[0x8]; 6105 u8 status[0x8];
6106 u8 reserved_0[0x18]; 6106 u8 reserved_at_8[0x18];
6107 6107
6108 u8 syndrome[0x20]; 6108 u8 syndrome[0x20];
6109 6109
6110 u8 reserved_1[0x40]; 6110 u8 reserved_at_40[0x40];
6111}; 6111};
6112 6112
6113struct mlx5_ifc_add_vxlan_udp_dport_in_bits { 6113struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
6114 u8 opcode[0x10]; 6114 u8 opcode[0x10];
6115 u8 reserved_0[0x10]; 6115 u8 reserved_at_10[0x10];
6116 6116
6117 u8 reserved_1[0x10]; 6117 u8 reserved_at_20[0x10];
6118 u8 op_mod[0x10]; 6118 u8 op_mod[0x10];
6119 6119
6120 u8 reserved_2[0x20]; 6120 u8 reserved_at_40[0x20];
6121 6121
6122 u8 reserved_3[0x10]; 6122 u8 reserved_at_60[0x10];
6123 u8 vxlan_udp_port[0x10]; 6123 u8 vxlan_udp_port[0x10];
6124}; 6124};
6125 6125
6126struct mlx5_ifc_access_register_out_bits { 6126struct mlx5_ifc_access_register_out_bits {
6127 u8 status[0x8]; 6127 u8 status[0x8];
6128 u8 reserved_0[0x18]; 6128 u8 reserved_at_8[0x18];
6129 6129
6130 u8 syndrome[0x20]; 6130 u8 syndrome[0x20];
6131 6131
6132 u8 reserved_1[0x40]; 6132 u8 reserved_at_40[0x40];
6133 6133
6134 u8 register_data[0][0x20]; 6134 u8 register_data[0][0x20];
6135}; 6135};
@@ -6141,12 +6141,12 @@ enum {
6141 6141
6142struct mlx5_ifc_access_register_in_bits { 6142struct mlx5_ifc_access_register_in_bits {
6143 u8 opcode[0x10]; 6143 u8 opcode[0x10];
6144 u8 reserved_0[0x10]; 6144 u8 reserved_at_10[0x10];
6145 6145
6146 u8 reserved_1[0x10]; 6146 u8 reserved_at_20[0x10];
6147 u8 op_mod[0x10]; 6147 u8 op_mod[0x10];
6148 6148
6149 u8 reserved_2[0x10]; 6149 u8 reserved_at_40[0x10];
6150 u8 register_id[0x10]; 6150 u8 register_id[0x10];
6151 6151
6152 u8 argument[0x20]; 6152 u8 argument[0x20];
@@ -6159,24 +6159,24 @@ struct mlx5_ifc_sltp_reg_bits {
6159 u8 version[0x4]; 6159 u8 version[0x4];
6160 u8 local_port[0x8]; 6160 u8 local_port[0x8];
6161 u8 pnat[0x2]; 6161 u8 pnat[0x2];
6162 u8 reserved_0[0x2]; 6162 u8 reserved_at_12[0x2];
6163 u8 lane[0x4]; 6163 u8 lane[0x4];
6164 u8 reserved_1[0x8]; 6164 u8 reserved_at_18[0x8];
6165 6165
6166 u8 reserved_2[0x20]; 6166 u8 reserved_at_20[0x20];
6167 6167
6168 u8 reserved_3[0x7]; 6168 u8 reserved_at_40[0x7];
6169 u8 polarity[0x1]; 6169 u8 polarity[0x1];
6170 u8 ob_tap0[0x8]; 6170 u8 ob_tap0[0x8];
6171 u8 ob_tap1[0x8]; 6171 u8 ob_tap1[0x8];
6172 u8 ob_tap2[0x8]; 6172 u8 ob_tap2[0x8];
6173 6173
6174 u8 reserved_4[0xc]; 6174 u8 reserved_at_60[0xc];
6175 u8 ob_preemp_mode[0x4]; 6175 u8 ob_preemp_mode[0x4];
6176 u8 ob_reg[0x8]; 6176 u8 ob_reg[0x8];
6177 u8 ob_bias[0x8]; 6177 u8 ob_bias[0x8];
6178 6178
6179 u8 reserved_5[0x20]; 6179 u8 reserved_at_80[0x20];
6180}; 6180};
6181 6181
6182struct mlx5_ifc_slrg_reg_bits { 6182struct mlx5_ifc_slrg_reg_bits {
@@ -6184,36 +6184,36 @@ struct mlx5_ifc_slrg_reg_bits {
6184 u8 version[0x4]; 6184 u8 version[0x4];
6185 u8 local_port[0x8]; 6185 u8 local_port[0x8];
6186 u8 pnat[0x2]; 6186 u8 pnat[0x2];
6187 u8 reserved_0[0x2]; 6187 u8 reserved_at_12[0x2];
6188 u8 lane[0x4]; 6188 u8 lane[0x4];
6189 u8 reserved_1[0x8]; 6189 u8 reserved_at_18[0x8];
6190 6190
6191 u8 time_to_link_up[0x10]; 6191 u8 time_to_link_up[0x10];
6192 u8 reserved_2[0xc]; 6192 u8 reserved_at_30[0xc];
6193 u8 grade_lane_speed[0x4]; 6193 u8 grade_lane_speed[0x4];
6194 6194
6195 u8 grade_version[0x8]; 6195 u8 grade_version[0x8];
6196 u8 grade[0x18]; 6196 u8 grade[0x18];
6197 6197
6198 u8 reserved_3[0x4]; 6198 u8 reserved_at_60[0x4];
6199 u8 height_grade_type[0x4]; 6199 u8 height_grade_type[0x4];
6200 u8 height_grade[0x18]; 6200 u8 height_grade[0x18];
6201 6201
6202 u8 height_dz[0x10]; 6202 u8 height_dz[0x10];
6203 u8 height_dv[0x10]; 6203 u8 height_dv[0x10];
6204 6204
6205 u8 reserved_4[0x10]; 6205 u8 reserved_at_a0[0x10];
6206 u8 height_sigma[0x10]; 6206 u8 height_sigma[0x10];
6207 6207
6208 u8 reserved_5[0x20]; 6208 u8 reserved_at_c0[0x20];
6209 6209
6210 u8 reserved_6[0x4]; 6210 u8 reserved_at_e0[0x4];
6211 u8 phase_grade_type[0x4]; 6211 u8 phase_grade_type[0x4];
6212 u8 phase_grade[0x18]; 6212 u8 phase_grade[0x18];
6213 6213
6214 u8 reserved_7[0x8]; 6214 u8 reserved_at_100[0x8];
6215 u8 phase_eo_pos[0x8]; 6215 u8 phase_eo_pos[0x8];
6216 u8 reserved_8[0x8]; 6216 u8 reserved_at_110[0x8];
6217 u8 phase_eo_neg[0x8]; 6217 u8 phase_eo_neg[0x8];
6218 6218
6219 u8 ffe_set_tested[0x10]; 6219 u8 ffe_set_tested[0x10];
@@ -6221,70 +6221,70 @@ struct mlx5_ifc_slrg_reg_bits {
6221}; 6221};
6222 6222
6223struct mlx5_ifc_pvlc_reg_bits { 6223struct mlx5_ifc_pvlc_reg_bits {
6224 u8 reserved_0[0x8]; 6224 u8 reserved_at_0[0x8];
6225 u8 local_port[0x8]; 6225 u8 local_port[0x8];
6226 u8 reserved_1[0x10]; 6226 u8 reserved_at_10[0x10];
6227 6227
6228 u8 reserved_2[0x1c]; 6228 u8 reserved_at_20[0x1c];
6229 u8 vl_hw_cap[0x4]; 6229 u8 vl_hw_cap[0x4];
6230 6230
6231 u8 reserved_3[0x1c]; 6231 u8 reserved_at_40[0x1c];
6232 u8 vl_admin[0x4]; 6232 u8 vl_admin[0x4];
6233 6233
6234 u8 reserved_4[0x1c]; 6234 u8 reserved_at_60[0x1c];
6235 u8 vl_operational[0x4]; 6235 u8 vl_operational[0x4];
6236}; 6236};
6237 6237
6238struct mlx5_ifc_pude_reg_bits { 6238struct mlx5_ifc_pude_reg_bits {
6239 u8 swid[0x8]; 6239 u8 swid[0x8];
6240 u8 local_port[0x8]; 6240 u8 local_port[0x8];
6241 u8 reserved_0[0x4]; 6241 u8 reserved_at_10[0x4];
6242 u8 admin_status[0x4]; 6242 u8 admin_status[0x4];
6243 u8 reserved_1[0x4]; 6243 u8 reserved_at_18[0x4];
6244 u8 oper_status[0x4]; 6244 u8 oper_status[0x4];
6245 6245
6246 u8 reserved_2[0x60]; 6246 u8 reserved_at_20[0x60];
6247}; 6247};
6248 6248
6249struct mlx5_ifc_ptys_reg_bits { 6249struct mlx5_ifc_ptys_reg_bits {
6250 u8 reserved_0[0x8]; 6250 u8 reserved_at_0[0x8];
6251 u8 local_port[0x8]; 6251 u8 local_port[0x8];
6252 u8 reserved_1[0xd]; 6252 u8 reserved_at_10[0xd];
6253 u8 proto_mask[0x3]; 6253 u8 proto_mask[0x3];
6254 6254
6255 u8 reserved_2[0x40]; 6255 u8 reserved_at_20[0x40];
6256 6256
6257 u8 eth_proto_capability[0x20]; 6257 u8 eth_proto_capability[0x20];
6258 6258
6259 u8 ib_link_width_capability[0x10]; 6259 u8 ib_link_width_capability[0x10];
6260 u8 ib_proto_capability[0x10]; 6260 u8 ib_proto_capability[0x10];
6261 6261
6262 u8 reserved_3[0x20]; 6262 u8 reserved_at_a0[0x20];
6263 6263
6264 u8 eth_proto_admin[0x20]; 6264 u8 eth_proto_admin[0x20];
6265 6265
6266 u8 ib_link_width_admin[0x10]; 6266 u8 ib_link_width_admin[0x10];
6267 u8 ib_proto_admin[0x10]; 6267 u8 ib_proto_admin[0x10];
6268 6268
6269 u8 reserved_4[0x20]; 6269 u8 reserved_at_100[0x20];
6270 6270
6271 u8 eth_proto_oper[0x20]; 6271 u8 eth_proto_oper[0x20];
6272 6272
6273 u8 ib_link_width_oper[0x10]; 6273 u8 ib_link_width_oper[0x10];
6274 u8 ib_proto_oper[0x10]; 6274 u8 ib_proto_oper[0x10];
6275 6275
6276 u8 reserved_5[0x20]; 6276 u8 reserved_at_160[0x20];
6277 6277
6278 u8 eth_proto_lp_advertise[0x20]; 6278 u8 eth_proto_lp_advertise[0x20];
6279 6279
6280 u8 reserved_6[0x60]; 6280 u8 reserved_at_1a0[0x60];
6281}; 6281};
6282 6282
6283struct mlx5_ifc_ptas_reg_bits { 6283struct mlx5_ifc_ptas_reg_bits {
6284 u8 reserved_0[0x20]; 6284 u8 reserved_at_0[0x20];
6285 6285
6286 u8 algorithm_options[0x10]; 6286 u8 algorithm_options[0x10];
6287 u8 reserved_1[0x4]; 6287 u8 reserved_at_30[0x4];
6288 u8 repetitions_mode[0x4]; 6288 u8 repetitions_mode[0x4];
6289 u8 num_of_repetitions[0x8]; 6289 u8 num_of_repetitions[0x8];
6290 6290
@@ -6310,13 +6310,13 @@ struct mlx5_ifc_ptas_reg_bits {
6310 u8 ndeo_error_threshold[0x10]; 6310 u8 ndeo_error_threshold[0x10];
6311 6311
6312 u8 mixer_offset_step_size[0x10]; 6312 u8 mixer_offset_step_size[0x10];
6313 u8 reserved_2[0x8]; 6313 u8 reserved_at_110[0x8];
6314 u8 mix90_phase_for_voltage_bath[0x8]; 6314 u8 mix90_phase_for_voltage_bath[0x8];
6315 6315
6316 u8 mixer_offset_start[0x10]; 6316 u8 mixer_offset_start[0x10];
6317 u8 mixer_offset_end[0x10]; 6317 u8 mixer_offset_end[0x10];
6318 6318
6319 u8 reserved_3[0x15]; 6319 u8 reserved_at_140[0x15];
6320 u8 ber_test_time[0xb]; 6320 u8 ber_test_time[0xb];
6321}; 6321};
6322 6322
@@ -6324,154 +6324,154 @@ struct mlx5_ifc_pspa_reg_bits {
6324 u8 swid[0x8]; 6324 u8 swid[0x8];
6325 u8 local_port[0x8]; 6325 u8 local_port[0x8];
6326 u8 sub_port[0x8]; 6326 u8 sub_port[0x8];
6327 u8 reserved_0[0x8]; 6327 u8 reserved_at_18[0x8];
6328 6328
6329 u8 reserved_1[0x20]; 6329 u8 reserved_at_20[0x20];
6330}; 6330};
6331 6331
6332struct mlx5_ifc_pqdr_reg_bits { 6332struct mlx5_ifc_pqdr_reg_bits {
6333 u8 reserved_0[0x8]; 6333 u8 reserved_at_0[0x8];
6334 u8 local_port[0x8]; 6334 u8 local_port[0x8];
6335 u8 reserved_1[0x5]; 6335 u8 reserved_at_10[0x5];
6336 u8 prio[0x3]; 6336 u8 prio[0x3];
6337 u8 reserved_2[0x6]; 6337 u8 reserved_at_18[0x6];
6338 u8 mode[0x2]; 6338 u8 mode[0x2];
6339 6339
6340 u8 reserved_3[0x20]; 6340 u8 reserved_at_20[0x20];
6341 6341
6342 u8 reserved_4[0x10]; 6342 u8 reserved_at_40[0x10];
6343 u8 min_threshold[0x10]; 6343 u8 min_threshold[0x10];
6344 6344
6345 u8 reserved_5[0x10]; 6345 u8 reserved_at_60[0x10];
6346 u8 max_threshold[0x10]; 6346 u8 max_threshold[0x10];
6347 6347
6348 u8 reserved_6[0x10]; 6348 u8 reserved_at_80[0x10];
6349 u8 mark_probability_denominator[0x10]; 6349 u8 mark_probability_denominator[0x10];
6350 6350
6351 u8 reserved_7[0x60]; 6351 u8 reserved_at_a0[0x60];
6352}; 6352};
6353 6353
6354struct mlx5_ifc_ppsc_reg_bits { 6354struct mlx5_ifc_ppsc_reg_bits {
6355 u8 reserved_0[0x8]; 6355 u8 reserved_at_0[0x8];
6356 u8 local_port[0x8]; 6356 u8 local_port[0x8];
6357 u8 reserved_1[0x10]; 6357 u8 reserved_at_10[0x10];
6358 6358
6359 u8 reserved_2[0x60]; 6359 u8 reserved_at_20[0x60];
6360 6360
6361 u8 reserved_3[0x1c]; 6361 u8 reserved_at_80[0x1c];
6362 u8 wrps_admin[0x4]; 6362 u8 wrps_admin[0x4];
6363 6363
6364 u8 reserved_4[0x1c]; 6364 u8 reserved_at_a0[0x1c];
6365 u8 wrps_status[0x4]; 6365 u8 wrps_status[0x4];
6366 6366
6367 u8 reserved_5[0x8]; 6367 u8 reserved_at_c0[0x8];
6368 u8 up_threshold[0x8]; 6368 u8 up_threshold[0x8];
6369 u8 reserved_6[0x8]; 6369 u8 reserved_at_d0[0x8];
6370 u8 down_threshold[0x8]; 6370 u8 down_threshold[0x8];
6371 6371
6372 u8 reserved_7[0x20]; 6372 u8 reserved_at_e0[0x20];
6373 6373
6374 u8 reserved_8[0x1c]; 6374 u8 reserved_at_100[0x1c];
6375 u8 srps_admin[0x4]; 6375 u8 srps_admin[0x4];
6376 6376
6377 u8 reserved_9[0x1c]; 6377 u8 reserved_at_120[0x1c];
6378 u8 srps_status[0x4]; 6378 u8 srps_status[0x4];
6379 6379
6380 u8 reserved_10[0x40]; 6380 u8 reserved_at_140[0x40];
6381}; 6381};
6382 6382
6383struct mlx5_ifc_pplr_reg_bits { 6383struct mlx5_ifc_pplr_reg_bits {
6384 u8 reserved_0[0x8]; 6384 u8 reserved_at_0[0x8];
6385 u8 local_port[0x8]; 6385 u8 local_port[0x8];
6386 u8 reserved_1[0x10]; 6386 u8 reserved_at_10[0x10];
6387 6387
6388 u8 reserved_2[0x8]; 6388 u8 reserved_at_20[0x8];
6389 u8 lb_cap[0x8]; 6389 u8 lb_cap[0x8];
6390 u8 reserved_3[0x8]; 6390 u8 reserved_at_30[0x8];
6391 u8 lb_en[0x8]; 6391 u8 lb_en[0x8];
6392}; 6392};
6393 6393
6394struct mlx5_ifc_pplm_reg_bits { 6394struct mlx5_ifc_pplm_reg_bits {
6395 u8 reserved_0[0x8]; 6395 u8 reserved_at_0[0x8];
6396 u8 local_port[0x8]; 6396 u8 local_port[0x8];
6397 u8 reserved_1[0x10]; 6397 u8 reserved_at_10[0x10];
6398 6398
6399 u8 reserved_2[0x20]; 6399 u8 reserved_at_20[0x20];
6400 6400
6401 u8 port_profile_mode[0x8]; 6401 u8 port_profile_mode[0x8];
6402 u8 static_port_profile[0x8]; 6402 u8 static_port_profile[0x8];
6403 u8 active_port_profile[0x8]; 6403 u8 active_port_profile[0x8];
6404 u8 reserved_3[0x8]; 6404 u8 reserved_at_58[0x8];
6405 6405
6406 u8 retransmission_active[0x8]; 6406 u8 retransmission_active[0x8];
6407 u8 fec_mode_active[0x18]; 6407 u8 fec_mode_active[0x18];
6408 6408
6409 u8 reserved_4[0x20]; 6409 u8 reserved_at_80[0x20];
6410}; 6410};
6411 6411
6412struct mlx5_ifc_ppcnt_reg_bits { 6412struct mlx5_ifc_ppcnt_reg_bits {
6413 u8 swid[0x8]; 6413 u8 swid[0x8];
6414 u8 local_port[0x8]; 6414 u8 local_port[0x8];
6415 u8 pnat[0x2]; 6415 u8 pnat[0x2];
6416 u8 reserved_0[0x8]; 6416 u8 reserved_at_12[0x8];
6417 u8 grp[0x6]; 6417 u8 grp[0x6];
6418 6418
6419 u8 clr[0x1]; 6419 u8 clr[0x1];
6420 u8 reserved_1[0x1c]; 6420 u8 reserved_at_21[0x1c];
6421 u8 prio_tc[0x3]; 6421 u8 prio_tc[0x3];
6422 6422
6423 union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set; 6423 union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
6424}; 6424};
6425 6425
6426struct mlx5_ifc_ppad_reg_bits { 6426struct mlx5_ifc_ppad_reg_bits {
6427 u8 reserved_0[0x3]; 6427 u8 reserved_at_0[0x3];
6428 u8 single_mac[0x1]; 6428 u8 single_mac[0x1];
6429 u8 reserved_1[0x4]; 6429 u8 reserved_at_4[0x4];
6430 u8 local_port[0x8]; 6430 u8 local_port[0x8];
6431 u8 mac_47_32[0x10]; 6431 u8 mac_47_32[0x10];
6432 6432
6433 u8 mac_31_0[0x20]; 6433 u8 mac_31_0[0x20];
6434 6434
6435 u8 reserved_2[0x40]; 6435 u8 reserved_at_40[0x40];
6436}; 6436};
6437 6437
6438struct mlx5_ifc_pmtu_reg_bits { 6438struct mlx5_ifc_pmtu_reg_bits {
6439 u8 reserved_0[0x8]; 6439 u8 reserved_at_0[0x8];
6440 u8 local_port[0x8]; 6440 u8 local_port[0x8];
6441 u8 reserved_1[0x10]; 6441 u8 reserved_at_10[0x10];
6442 6442
6443 u8 max_mtu[0x10]; 6443 u8 max_mtu[0x10];
6444 u8 reserved_2[0x10]; 6444 u8 reserved_at_30[0x10];
6445 6445
6446 u8 admin_mtu[0x10]; 6446 u8 admin_mtu[0x10];
6447 u8 reserved_3[0x10]; 6447 u8 reserved_at_50[0x10];
6448 6448
6449 u8 oper_mtu[0x10]; 6449 u8 oper_mtu[0x10];
6450 u8 reserved_4[0x10]; 6450 u8 reserved_at_70[0x10];
6451}; 6451};
6452 6452
6453struct mlx5_ifc_pmpr_reg_bits { 6453struct mlx5_ifc_pmpr_reg_bits {
6454 u8 reserved_0[0x8]; 6454 u8 reserved_at_0[0x8];
6455 u8 module[0x8]; 6455 u8 module[0x8];
6456 u8 reserved_1[0x10]; 6456 u8 reserved_at_10[0x10];
6457 6457
6458 u8 reserved_2[0x18]; 6458 u8 reserved_at_20[0x18];
6459 u8 attenuation_5g[0x8]; 6459 u8 attenuation_5g[0x8];
6460 6460
6461 u8 reserved_3[0x18]; 6461 u8 reserved_at_40[0x18];
6462 u8 attenuation_7g[0x8]; 6462 u8 attenuation_7g[0x8];
6463 6463
6464 u8 reserved_4[0x18]; 6464 u8 reserved_at_60[0x18];
6465 u8 attenuation_12g[0x8]; 6465 u8 attenuation_12g[0x8];
6466}; 6466};
6467 6467
6468struct mlx5_ifc_pmpe_reg_bits { 6468struct mlx5_ifc_pmpe_reg_bits {
6469 u8 reserved_0[0x8]; 6469 u8 reserved_at_0[0x8];
6470 u8 module[0x8]; 6470 u8 module[0x8];
6471 u8 reserved_1[0xc]; 6471 u8 reserved_at_10[0xc];
6472 u8 module_status[0x4]; 6472 u8 module_status[0x4];
6473 6473
6474 u8 reserved_2[0x60]; 6474 u8 reserved_at_20[0x60];
6475}; 6475};
6476 6476
6477struct mlx5_ifc_pmpc_reg_bits { 6477struct mlx5_ifc_pmpc_reg_bits {
@@ -6479,20 +6479,20 @@ struct mlx5_ifc_pmpc_reg_bits {
6479}; 6479};
6480 6480
6481struct mlx5_ifc_pmlpn_reg_bits { 6481struct mlx5_ifc_pmlpn_reg_bits {
6482 u8 reserved_0[0x4]; 6482 u8 reserved_at_0[0x4];
6483 u8 mlpn_status[0x4]; 6483 u8 mlpn_status[0x4];
6484 u8 local_port[0x8]; 6484 u8 local_port[0x8];
6485 u8 reserved_1[0x10]; 6485 u8 reserved_at_10[0x10];
6486 6486
6487 u8 e[0x1]; 6487 u8 e[0x1];
6488 u8 reserved_2[0x1f]; 6488 u8 reserved_at_21[0x1f];
6489}; 6489};
6490 6490
6491struct mlx5_ifc_pmlp_reg_bits { 6491struct mlx5_ifc_pmlp_reg_bits {
6492 u8 rxtx[0x1]; 6492 u8 rxtx[0x1];
6493 u8 reserved_0[0x7]; 6493 u8 reserved_at_1[0x7];
6494 u8 local_port[0x8]; 6494 u8 local_port[0x8];
6495 u8 reserved_1[0x8]; 6495 u8 reserved_at_10[0x8];
6496 u8 width[0x8]; 6496 u8 width[0x8];
6497 6497
6498 u8 lane0_module_mapping[0x20]; 6498 u8 lane0_module_mapping[0x20];
@@ -6503,36 +6503,36 @@ struct mlx5_ifc_pmlp_reg_bits {
6503 6503
6504 u8 lane3_module_mapping[0x20]; 6504 u8 lane3_module_mapping[0x20];
6505 6505
6506 u8 reserved_2[0x160]; 6506 u8 reserved_at_a0[0x160];
6507}; 6507};
6508 6508
6509struct mlx5_ifc_pmaos_reg_bits { 6509struct mlx5_ifc_pmaos_reg_bits {
6510 u8 reserved_0[0x8]; 6510 u8 reserved_at_0[0x8];
6511 u8 module[0x8]; 6511 u8 module[0x8];
6512 u8 reserved_1[0x4]; 6512 u8 reserved_at_10[0x4];
6513 u8 admin_status[0x4]; 6513 u8 admin_status[0x4];
6514 u8 reserved_2[0x4]; 6514 u8 reserved_at_18[0x4];
6515 u8 oper_status[0x4]; 6515 u8 oper_status[0x4];
6516 6516
6517 u8 ase[0x1]; 6517 u8 ase[0x1];
6518 u8 ee[0x1]; 6518 u8 ee[0x1];
6519 u8 reserved_3[0x1c]; 6519 u8 reserved_at_22[0x1c];
6520 u8 e[0x2]; 6520 u8 e[0x2];
6521 6521
6522 u8 reserved_4[0x40]; 6522 u8 reserved_at_40[0x40];
6523}; 6523};
6524 6524
6525struct mlx5_ifc_plpc_reg_bits { 6525struct mlx5_ifc_plpc_reg_bits {
6526 u8 reserved_0[0x4]; 6526 u8 reserved_at_0[0x4];
6527 u8 profile_id[0xc]; 6527 u8 profile_id[0xc];
6528 u8 reserved_1[0x4]; 6528 u8 reserved_at_10[0x4];
6529 u8 proto_mask[0x4]; 6529 u8 proto_mask[0x4];
6530 u8 reserved_2[0x8]; 6530 u8 reserved_at_18[0x8];
6531 6531
6532 u8 reserved_3[0x10]; 6532 u8 reserved_at_20[0x10];
6533 u8 lane_speed[0x10]; 6533 u8 lane_speed[0x10];
6534 6534
6535 u8 reserved_4[0x17]; 6535 u8 reserved_at_40[0x17];
6536 u8 lpbf[0x1]; 6536 u8 lpbf[0x1];
6537 u8 fec_mode_policy[0x8]; 6537 u8 fec_mode_policy[0x8];
6538 6538
@@ -6545,44 +6545,44 @@ struct mlx5_ifc_plpc_reg_bits {
6545 u8 retransmission_request_admin[0x8]; 6545 u8 retransmission_request_admin[0x8];
6546 u8 fec_mode_request_admin[0x18]; 6546 u8 fec_mode_request_admin[0x18];
6547 6547
6548 u8 reserved_5[0x80]; 6548 u8 reserved_at_c0[0x80];
6549}; 6549};
6550 6550
6551struct mlx5_ifc_plib_reg_bits { 6551struct mlx5_ifc_plib_reg_bits {
6552 u8 reserved_0[0x8]; 6552 u8 reserved_at_0[0x8];
6553 u8 local_port[0x8]; 6553 u8 local_port[0x8];
6554 u8 reserved_1[0x8]; 6554 u8 reserved_at_10[0x8];
6555 u8 ib_port[0x8]; 6555 u8 ib_port[0x8];
6556 6556
6557 u8 reserved_2[0x60]; 6557 u8 reserved_at_20[0x60];
6558}; 6558};
6559 6559
6560struct mlx5_ifc_plbf_reg_bits { 6560struct mlx5_ifc_plbf_reg_bits {
6561 u8 reserved_0[0x8]; 6561 u8 reserved_at_0[0x8];
6562 u8 local_port[0x8]; 6562 u8 local_port[0x8];
6563 u8 reserved_1[0xd]; 6563 u8 reserved_at_10[0xd];
6564 u8 lbf_mode[0x3]; 6564 u8 lbf_mode[0x3];
6565 6565
6566 u8 reserved_2[0x20]; 6566 u8 reserved_at_20[0x20];
6567}; 6567};
6568 6568
6569struct mlx5_ifc_pipg_reg_bits { 6569struct mlx5_ifc_pipg_reg_bits {
6570 u8 reserved_0[0x8]; 6570 u8 reserved_at_0[0x8];
6571 u8 local_port[0x8]; 6571 u8 local_port[0x8];
6572 u8 reserved_1[0x10]; 6572 u8 reserved_at_10[0x10];
6573 6573
6574 u8 dic[0x1]; 6574 u8 dic[0x1];
6575 u8 reserved_2[0x19]; 6575 u8 reserved_at_21[0x19];
6576 u8 ipg[0x4]; 6576 u8 ipg[0x4];
6577 u8 reserved_3[0x2]; 6577 u8 reserved_at_3e[0x2];
6578}; 6578};
6579 6579
6580struct mlx5_ifc_pifr_reg_bits { 6580struct mlx5_ifc_pifr_reg_bits {
6581 u8 reserved_0[0x8]; 6581 u8 reserved_at_0[0x8];
6582 u8 local_port[0x8]; 6582 u8 local_port[0x8];
6583 u8 reserved_1[0x10]; 6583 u8 reserved_at_10[0x10];
6584 6584
6585 u8 reserved_2[0xe0]; 6585 u8 reserved_at_20[0xe0];
6586 6586
6587 u8 port_filter[8][0x20]; 6587 u8 port_filter[8][0x20];
6588 6588
@@ -6590,36 +6590,36 @@ struct mlx5_ifc_pifr_reg_bits {
6590}; 6590};
6591 6591
6592struct mlx5_ifc_pfcc_reg_bits { 6592struct mlx5_ifc_pfcc_reg_bits {
6593 u8 reserved_0[0x8]; 6593 u8 reserved_at_0[0x8];
6594 u8 local_port[0x8]; 6594 u8 local_port[0x8];
6595 u8 reserved_1[0x10]; 6595 u8 reserved_at_10[0x10];
6596 6596
6597 u8 ppan[0x4]; 6597 u8 ppan[0x4];
6598 u8 reserved_2[0x4]; 6598 u8 reserved_at_24[0x4];
6599 u8 prio_mask_tx[0x8]; 6599 u8 prio_mask_tx[0x8];
6600 u8 reserved_3[0x8]; 6600 u8 reserved_at_30[0x8];
6601 u8 prio_mask_rx[0x8]; 6601 u8 prio_mask_rx[0x8];
6602 6602
6603 u8 pptx[0x1]; 6603 u8 pptx[0x1];
6604 u8 aptx[0x1]; 6604 u8 aptx[0x1];
6605 u8 reserved_4[0x6]; 6605 u8 reserved_at_42[0x6];
6606 u8 pfctx[0x8]; 6606 u8 pfctx[0x8];
6607 u8 reserved_5[0x10]; 6607 u8 reserved_at_50[0x10];
6608 6608
6609 u8 pprx[0x1]; 6609 u8 pprx[0x1];
6610 u8 aprx[0x1]; 6610 u8 aprx[0x1];
6611 u8 reserved_6[0x6]; 6611 u8 reserved_at_62[0x6];
6612 u8 pfcrx[0x8]; 6612 u8 pfcrx[0x8];
6613 u8 reserved_7[0x10]; 6613 u8 reserved_at_70[0x10];
6614 6614
6615 u8 reserved_8[0x80]; 6615 u8 reserved_at_80[0x80];
6616}; 6616};
6617 6617
6618struct mlx5_ifc_pelc_reg_bits { 6618struct mlx5_ifc_pelc_reg_bits {
6619 u8 op[0x4]; 6619 u8 op[0x4];
6620 u8 reserved_0[0x4]; 6620 u8 reserved_at_4[0x4];
6621 u8 local_port[0x8]; 6621 u8 local_port[0x8];
6622 u8 reserved_1[0x10]; 6622 u8 reserved_at_10[0x10];
6623 6623
6624 u8 op_admin[0x8]; 6624 u8 op_admin[0x8];
6625 u8 op_capability[0x8]; 6625 u8 op_capability[0x8];
@@ -6634,28 +6634,28 @@ struct mlx5_ifc_pelc_reg_bits {
6634 6634
6635 u8 active[0x40]; 6635 u8 active[0x40];
6636 6636
6637 u8 reserved_2[0x80]; 6637 u8 reserved_at_140[0x80];
6638}; 6638};
6639 6639
6640struct mlx5_ifc_peir_reg_bits { 6640struct mlx5_ifc_peir_reg_bits {
6641 u8 reserved_0[0x8]; 6641 u8 reserved_at_0[0x8];
6642 u8 local_port[0x8]; 6642 u8 local_port[0x8];
6643 u8 reserved_1[0x10]; 6643 u8 reserved_at_10[0x10];
6644 6644
6645 u8 reserved_2[0xc]; 6645 u8 reserved_at_20[0xc];
6646 u8 error_count[0x4]; 6646 u8 error_count[0x4];
6647 u8 reserved_3[0x10]; 6647 u8 reserved_at_30[0x10];
6648 6648
6649 u8 reserved_4[0xc]; 6649 u8 reserved_at_40[0xc];
6650 u8 lane[0x4]; 6650 u8 lane[0x4];
6651 u8 reserved_5[0x8]; 6651 u8 reserved_at_50[0x8];
6652 u8 error_type[0x8]; 6652 u8 error_type[0x8];
6653}; 6653};
6654 6654
6655struct mlx5_ifc_pcap_reg_bits { 6655struct mlx5_ifc_pcap_reg_bits {
6656 u8 reserved_0[0x8]; 6656 u8 reserved_at_0[0x8];
6657 u8 local_port[0x8]; 6657 u8 local_port[0x8];
6658 u8 reserved_1[0x10]; 6658 u8 reserved_at_10[0x10];
6659 6659
6660 u8 port_capability_mask[4][0x20]; 6660 u8 port_capability_mask[4][0x20];
6661}; 6661};
@@ -6663,46 +6663,46 @@ struct mlx5_ifc_pcap_reg_bits {
6663struct mlx5_ifc_paos_reg_bits { 6663struct mlx5_ifc_paos_reg_bits {
6664 u8 swid[0x8]; 6664 u8 swid[0x8];
6665 u8 local_port[0x8]; 6665 u8 local_port[0x8];
6666 u8 reserved_0[0x4]; 6666 u8 reserved_at_10[0x4];
6667 u8 admin_status[0x4]; 6667 u8 admin_status[0x4];
6668 u8 reserved_1[0x4]; 6668 u8 reserved_at_18[0x4];
6669 u8 oper_status[0x4]; 6669 u8 oper_status[0x4];
6670 6670
6671 u8 ase[0x1]; 6671 u8 ase[0x1];
6672 u8 ee[0x1]; 6672 u8 ee[0x1];
6673 u8 reserved_2[0x1c]; 6673 u8 reserved_at_22[0x1c];
6674 u8 e[0x2]; 6674 u8 e[0x2];
6675 6675
6676 u8 reserved_3[0x40]; 6676 u8 reserved_at_40[0x40];
6677}; 6677};
6678 6678
6679struct mlx5_ifc_pamp_reg_bits { 6679struct mlx5_ifc_pamp_reg_bits {
6680 u8 reserved_0[0x8]; 6680 u8 reserved_at_0[0x8];
6681 u8 opamp_group[0x8]; 6681 u8 opamp_group[0x8];
6682 u8 reserved_1[0xc]; 6682 u8 reserved_at_10[0xc];
6683 u8 opamp_group_type[0x4]; 6683 u8 opamp_group_type[0x4];
6684 6684
6685 u8 start_index[0x10]; 6685 u8 start_index[0x10];
6686 u8 reserved_2[0x4]; 6686 u8 reserved_at_30[0x4];
6687 u8 num_of_indices[0xc]; 6687 u8 num_of_indices[0xc];
6688 6688
6689 u8 index_data[18][0x10]; 6689 u8 index_data[18][0x10];
6690}; 6690};
6691 6691
6692struct mlx5_ifc_lane_2_module_mapping_bits { 6692struct mlx5_ifc_lane_2_module_mapping_bits {
6693 u8 reserved_0[0x6]; 6693 u8 reserved_at_0[0x6];
6694 u8 rx_lane[0x2]; 6694 u8 rx_lane[0x2];
6695 u8 reserved_1[0x6]; 6695 u8 reserved_at_8[0x6];
6696 u8 tx_lane[0x2]; 6696 u8 tx_lane[0x2];
6697 u8 reserved_2[0x8]; 6697 u8 reserved_at_10[0x8];
6698 u8 module[0x8]; 6698 u8 module[0x8];
6699}; 6699};
6700 6700
6701struct mlx5_ifc_bufferx_reg_bits { 6701struct mlx5_ifc_bufferx_reg_bits {
6702 u8 reserved_0[0x6]; 6702 u8 reserved_at_0[0x6];
6703 u8 lossy[0x1]; 6703 u8 lossy[0x1];
6704 u8 epsb[0x1]; 6704 u8 epsb[0x1];
6705 u8 reserved_1[0xc]; 6705 u8 reserved_at_8[0xc];
6706 u8 size[0xc]; 6706 u8 size[0xc];
6707 6707
6708 u8 xoff_threshold[0x10]; 6708 u8 xoff_threshold[0x10];
@@ -6714,21 +6714,21 @@ struct mlx5_ifc_set_node_in_bits {
6714}; 6714};
6715 6715
6716struct mlx5_ifc_register_power_settings_bits { 6716struct mlx5_ifc_register_power_settings_bits {
6717 u8 reserved_0[0x18]; 6717 u8 reserved_at_0[0x18];
6718 u8 power_settings_level[0x8]; 6718 u8 power_settings_level[0x8];
6719 6719
6720 u8 reserved_1[0x60]; 6720 u8 reserved_at_20[0x60];
6721}; 6721};
6722 6722
6723struct mlx5_ifc_register_host_endianness_bits { 6723struct mlx5_ifc_register_host_endianness_bits {
6724 u8 he[0x1]; 6724 u8 he[0x1];
6725 u8 reserved_0[0x1f]; 6725 u8 reserved_at_1[0x1f];
6726 6726
6727 u8 reserved_1[0x60]; 6727 u8 reserved_at_20[0x60];
6728}; 6728};
6729 6729
6730struct mlx5_ifc_umr_pointer_desc_argument_bits { 6730struct mlx5_ifc_umr_pointer_desc_argument_bits {
6731 u8 reserved_0[0x20]; 6731 u8 reserved_at_0[0x20];
6732 6732
6733 u8 mkey[0x20]; 6733 u8 mkey[0x20];
6734 6734
@@ -6741,7 +6741,7 @@ struct mlx5_ifc_ud_adrs_vector_bits {
6741 u8 dc_key[0x40]; 6741 u8 dc_key[0x40];
6742 6742
6743 u8 ext[0x1]; 6743 u8 ext[0x1];
6744 u8 reserved_0[0x7]; 6744 u8 reserved_at_41[0x7];
6745 u8 destination_qp_dct[0x18]; 6745 u8 destination_qp_dct[0x18];
6746 6746
6747 u8 static_rate[0x4]; 6747 u8 static_rate[0x4];
@@ -6750,7 +6750,7 @@ struct mlx5_ifc_ud_adrs_vector_bits {
6750 u8 mlid[0x7]; 6750 u8 mlid[0x7];
6751 u8 rlid_udp_sport[0x10]; 6751 u8 rlid_udp_sport[0x10];
6752 6752
6753 u8 reserved_1[0x20]; 6753 u8 reserved_at_80[0x20];
6754 6754
6755 u8 rmac_47_16[0x20]; 6755 u8 rmac_47_16[0x20];
6756 6756
@@ -6758,9 +6758,9 @@ struct mlx5_ifc_ud_adrs_vector_bits {
6758 u8 tclass[0x8]; 6758 u8 tclass[0x8];
6759 u8 hop_limit[0x8]; 6759 u8 hop_limit[0x8];
6760 6760
6761 u8 reserved_2[0x1]; 6761 u8 reserved_at_e0[0x1];
6762 u8 grh[0x1]; 6762 u8 grh[0x1];
6763 u8 reserved_3[0x2]; 6763 u8 reserved_at_e2[0x2];
6764 u8 src_addr_index[0x8]; 6764 u8 src_addr_index[0x8];
6765 u8 flow_label[0x14]; 6765 u8 flow_label[0x14];
6766 6766
@@ -6768,27 +6768,27 @@ struct mlx5_ifc_ud_adrs_vector_bits {
6768}; 6768};
6769 6769
6770struct mlx5_ifc_pages_req_event_bits { 6770struct mlx5_ifc_pages_req_event_bits {
6771 u8 reserved_0[0x10]; 6771 u8 reserved_at_0[0x10];
6772 u8 function_id[0x10]; 6772 u8 function_id[0x10];
6773 6773
6774 u8 num_pages[0x20]; 6774 u8 num_pages[0x20];
6775 6775
6776 u8 reserved_1[0xa0]; 6776 u8 reserved_at_40[0xa0];
6777}; 6777};
6778 6778
6779struct mlx5_ifc_eqe_bits { 6779struct mlx5_ifc_eqe_bits {
6780 u8 reserved_0[0x8]; 6780 u8 reserved_at_0[0x8];
6781 u8 event_type[0x8]; 6781 u8 event_type[0x8];
6782 u8 reserved_1[0x8]; 6782 u8 reserved_at_10[0x8];
6783 u8 event_sub_type[0x8]; 6783 u8 event_sub_type[0x8];
6784 6784
6785 u8 reserved_2[0xe0]; 6785 u8 reserved_at_20[0xe0];
6786 6786
6787 union mlx5_ifc_event_auto_bits event_data; 6787 union mlx5_ifc_event_auto_bits event_data;
6788 6788
6789 u8 reserved_3[0x10]; 6789 u8 reserved_at_1e0[0x10];
6790 u8 signature[0x8]; 6790 u8 signature[0x8];
6791 u8 reserved_4[0x7]; 6791 u8 reserved_at_1f8[0x7];
6792 u8 owner[0x1]; 6792 u8 owner[0x1];
6793}; 6793};
6794 6794
@@ -6798,14 +6798,14 @@ enum {
6798 6798
6799struct mlx5_ifc_cmd_queue_entry_bits { 6799struct mlx5_ifc_cmd_queue_entry_bits {
6800 u8 type[0x8]; 6800 u8 type[0x8];
6801 u8 reserved_0[0x18]; 6801 u8 reserved_at_8[0x18];
6802 6802
6803 u8 input_length[0x20]; 6803 u8 input_length[0x20];
6804 6804
6805 u8 input_mailbox_pointer_63_32[0x20]; 6805 u8 input_mailbox_pointer_63_32[0x20];
6806 6806
6807 u8 input_mailbox_pointer_31_9[0x17]; 6807 u8 input_mailbox_pointer_31_9[0x17];
6808 u8 reserved_1[0x9]; 6808 u8 reserved_at_77[0x9];
6809 6809
6810 u8 command_input_inline_data[16][0x8]; 6810 u8 command_input_inline_data[16][0x8];
6811 6811
@@ -6814,20 +6814,20 @@ struct mlx5_ifc_cmd_queue_entry_bits {
6814 u8 output_mailbox_pointer_63_32[0x20]; 6814 u8 output_mailbox_pointer_63_32[0x20];
6815 6815
6816 u8 output_mailbox_pointer_31_9[0x17]; 6816 u8 output_mailbox_pointer_31_9[0x17];
6817 u8 reserved_2[0x9]; 6817 u8 reserved_at_1b7[0x9];
6818 6818
6819 u8 output_length[0x20]; 6819 u8 output_length[0x20];
6820 6820
6821 u8 token[0x8]; 6821 u8 token[0x8];
6822 u8 signature[0x8]; 6822 u8 signature[0x8];
6823 u8 reserved_3[0x8]; 6823 u8 reserved_at_1f0[0x8];
6824 u8 status[0x7]; 6824 u8 status[0x7];
6825 u8 ownership[0x1]; 6825 u8 ownership[0x1];
6826}; 6826};
6827 6827
6828struct mlx5_ifc_cmd_out_bits { 6828struct mlx5_ifc_cmd_out_bits {
6829 u8 status[0x8]; 6829 u8 status[0x8];
6830 u8 reserved_0[0x18]; 6830 u8 reserved_at_8[0x18];
6831 6831
6832 u8 syndrome[0x20]; 6832 u8 syndrome[0x20];
6833 6833
@@ -6836,9 +6836,9 @@ struct mlx5_ifc_cmd_out_bits {
6836 6836
6837struct mlx5_ifc_cmd_in_bits { 6837struct mlx5_ifc_cmd_in_bits {
6838 u8 opcode[0x10]; 6838 u8 opcode[0x10];
6839 u8 reserved_0[0x10]; 6839 u8 reserved_at_10[0x10];
6840 6840
6841 u8 reserved_1[0x10]; 6841 u8 reserved_at_20[0x10];
6842 u8 op_mod[0x10]; 6842 u8 op_mod[0x10];
6843 6843
6844 u8 command[0][0x20]; 6844 u8 command[0][0x20];
@@ -6847,16 +6847,16 @@ struct mlx5_ifc_cmd_in_bits {
6847struct mlx5_ifc_cmd_if_box_bits { 6847struct mlx5_ifc_cmd_if_box_bits {
6848 u8 mailbox_data[512][0x8]; 6848 u8 mailbox_data[512][0x8];
6849 6849
6850 u8 reserved_0[0x180]; 6850 u8 reserved_at_1000[0x180];
6851 6851
6852 u8 next_pointer_63_32[0x20]; 6852 u8 next_pointer_63_32[0x20];
6853 6853
6854 u8 next_pointer_31_10[0x16]; 6854 u8 next_pointer_31_10[0x16];
6855 u8 reserved_1[0xa]; 6855 u8 reserved_at_11b6[0xa];
6856 6856
6857 u8 block_number[0x20]; 6857 u8 block_number[0x20];
6858 6858
6859 u8 reserved_2[0x8]; 6859 u8 reserved_at_11e0[0x8];
6860 u8 token[0x8]; 6860 u8 token[0x8];
6861 u8 ctrl_signature[0x8]; 6861 u8 ctrl_signature[0x8];
6862 u8 signature[0x8]; 6862 u8 signature[0x8];
@@ -6866,7 +6866,7 @@ struct mlx5_ifc_mtt_bits {
6866 u8 ptag_63_32[0x20]; 6866 u8 ptag_63_32[0x20];
6867 6867
6868 u8 ptag_31_8[0x18]; 6868 u8 ptag_31_8[0x18];
6869 u8 reserved_0[0x6]; 6869 u8 reserved_at_38[0x6];
6870 u8 wr_en[0x1]; 6870 u8 wr_en[0x1];
6871 u8 rd_en[0x1]; 6871 u8 rd_en[0x1];
6872}; 6872};
@@ -6904,38 +6904,38 @@ struct mlx5_ifc_initial_seg_bits {
6904 u8 cmd_interface_rev[0x10]; 6904 u8 cmd_interface_rev[0x10];
6905 u8 fw_rev_subminor[0x10]; 6905 u8 fw_rev_subminor[0x10];
6906 6906
6907 u8 reserved_0[0x40]; 6907 u8 reserved_at_40[0x40];
6908 6908
6909 u8 cmdq_phy_addr_63_32[0x20]; 6909 u8 cmdq_phy_addr_63_32[0x20];
6910 6910
6911 u8 cmdq_phy_addr_31_12[0x14]; 6911 u8 cmdq_phy_addr_31_12[0x14];
6912 u8 reserved_1[0x2]; 6912 u8 reserved_at_b4[0x2];
6913 u8 nic_interface[0x2]; 6913 u8 nic_interface[0x2];
6914 u8 log_cmdq_size[0x4]; 6914 u8 log_cmdq_size[0x4];
6915 u8 log_cmdq_stride[0x4]; 6915 u8 log_cmdq_stride[0x4];
6916 6916
6917 u8 command_doorbell_vector[0x20]; 6917 u8 command_doorbell_vector[0x20];
6918 6918
6919 u8 reserved_2[0xf00]; 6919 u8 reserved_at_e0[0xf00];
6920 6920
6921 u8 initializing[0x1]; 6921 u8 initializing[0x1];
6922 u8 reserved_3[0x4]; 6922 u8 reserved_at_fe1[0x4];
6923 u8 nic_interface_supported[0x3]; 6923 u8 nic_interface_supported[0x3];
6924 u8 reserved_4[0x18]; 6924 u8 reserved_at_fe8[0x18];
6925 6925
6926 struct mlx5_ifc_health_buffer_bits health_buffer; 6926 struct mlx5_ifc_health_buffer_bits health_buffer;
6927 6927
6928 u8 no_dram_nic_offset[0x20]; 6928 u8 no_dram_nic_offset[0x20];
6929 6929
6930 u8 reserved_5[0x6e40]; 6930 u8 reserved_at_1220[0x6e40];
6931 6931
6932 u8 reserved_6[0x1f]; 6932 u8 reserved_at_8060[0x1f];
6933 u8 clear_int[0x1]; 6933 u8 clear_int[0x1];
6934 6934
6935 u8 health_syndrome[0x8]; 6935 u8 health_syndrome[0x8];
6936 u8 health_counter[0x18]; 6936 u8 health_counter[0x18];
6937 6937
6938 u8 reserved_7[0x17fc0]; 6938 u8 reserved_at_80a0[0x17fc0];
6939}; 6939};
6940 6940
6941union mlx5_ifc_ports_control_registers_document_bits { 6941union mlx5_ifc_ports_control_registers_document_bits {
@@ -6980,44 +6980,44 @@ union mlx5_ifc_ports_control_registers_document_bits {
6980 struct mlx5_ifc_pvlc_reg_bits pvlc_reg; 6980 struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
6981 struct mlx5_ifc_slrg_reg_bits slrg_reg; 6981 struct mlx5_ifc_slrg_reg_bits slrg_reg;
6982 struct mlx5_ifc_sltp_reg_bits sltp_reg; 6982 struct mlx5_ifc_sltp_reg_bits sltp_reg;
6983 u8 reserved_0[0x60e0]; 6983 u8 reserved_at_0[0x60e0];
6984}; 6984};
6985 6985
6986union mlx5_ifc_debug_enhancements_document_bits { 6986union mlx5_ifc_debug_enhancements_document_bits {
6987 struct mlx5_ifc_health_buffer_bits health_buffer; 6987 struct mlx5_ifc_health_buffer_bits health_buffer;
6988 u8 reserved_0[0x200]; 6988 u8 reserved_at_0[0x200];
6989}; 6989};
6990 6990
6991union mlx5_ifc_uplink_pci_interface_document_bits { 6991union mlx5_ifc_uplink_pci_interface_document_bits {
6992 struct mlx5_ifc_initial_seg_bits initial_seg; 6992 struct mlx5_ifc_initial_seg_bits initial_seg;
6993 u8 reserved_0[0x20060]; 6993 u8 reserved_at_0[0x20060];
6994}; 6994};
6995 6995
6996struct mlx5_ifc_set_flow_table_root_out_bits { 6996struct mlx5_ifc_set_flow_table_root_out_bits {
6997 u8 status[0x8]; 6997 u8 status[0x8];
6998 u8 reserved_0[0x18]; 6998 u8 reserved_at_8[0x18];
6999 6999
7000 u8 syndrome[0x20]; 7000 u8 syndrome[0x20];
7001 7001
7002 u8 reserved_1[0x40]; 7002 u8 reserved_at_40[0x40];
7003}; 7003};
7004 7004
7005struct mlx5_ifc_set_flow_table_root_in_bits { 7005struct mlx5_ifc_set_flow_table_root_in_bits {
7006 u8 opcode[0x10]; 7006 u8 opcode[0x10];
7007 u8 reserved_0[0x10]; 7007 u8 reserved_at_10[0x10];
7008 7008
7009 u8 reserved_1[0x10]; 7009 u8 reserved_at_20[0x10];
7010 u8 op_mod[0x10]; 7010 u8 op_mod[0x10];
7011 7011
7012 u8 reserved_2[0x40]; 7012 u8 reserved_at_40[0x40];
7013 7013
7014 u8 table_type[0x8]; 7014 u8 table_type[0x8];
7015 u8 reserved_3[0x18]; 7015 u8 reserved_at_88[0x18];
7016 7016
7017 u8 reserved_4[0x8]; 7017 u8 reserved_at_a0[0x8];
7018 u8 table_id[0x18]; 7018 u8 table_id[0x18];
7019 7019
7020 u8 reserved_5[0x140]; 7020 u8 reserved_at_c0[0x140];
7021}; 7021};
7022 7022
7023enum { 7023enum {
@@ -7026,39 +7026,39 @@ enum {
7026 7026
7027struct mlx5_ifc_modify_flow_table_out_bits { 7027struct mlx5_ifc_modify_flow_table_out_bits {
7028 u8 status[0x8]; 7028 u8 status[0x8];
7029 u8 reserved_0[0x18]; 7029 u8 reserved_at_8[0x18];
7030 7030
7031 u8 syndrome[0x20]; 7031 u8 syndrome[0x20];
7032 7032
7033 u8 reserved_1[0x40]; 7033 u8 reserved_at_40[0x40];
7034}; 7034};
7035 7035
7036struct mlx5_ifc_modify_flow_table_in_bits { 7036struct mlx5_ifc_modify_flow_table_in_bits {
7037 u8 opcode[0x10]; 7037 u8 opcode[0x10];
7038 u8 reserved_0[0x10]; 7038 u8 reserved_at_10[0x10];
7039 7039
7040 u8 reserved_1[0x10]; 7040 u8 reserved_at_20[0x10];
7041 u8 op_mod[0x10]; 7041 u8 op_mod[0x10];
7042 7042
7043 u8 reserved_2[0x20]; 7043 u8 reserved_at_40[0x20];
7044 7044
7045 u8 reserved_3[0x10]; 7045 u8 reserved_at_60[0x10];
7046 u8 modify_field_select[0x10]; 7046 u8 modify_field_select[0x10];
7047 7047
7048 u8 table_type[0x8]; 7048 u8 table_type[0x8];
7049 u8 reserved_4[0x18]; 7049 u8 reserved_at_88[0x18];
7050 7050
7051 u8 reserved_5[0x8]; 7051 u8 reserved_at_a0[0x8];
7052 u8 table_id[0x18]; 7052 u8 table_id[0x18];
7053 7053
7054 u8 reserved_6[0x4]; 7054 u8 reserved_at_c0[0x4];
7055 u8 table_miss_mode[0x4]; 7055 u8 table_miss_mode[0x4];
7056 u8 reserved_7[0x18]; 7056 u8 reserved_at_c8[0x18];
7057 7057
7058 u8 reserved_8[0x8]; 7058 u8 reserved_at_e0[0x8];
7059 u8 table_miss_id[0x18]; 7059 u8 table_miss_id[0x18];
7060 7060
7061 u8 reserved_9[0x100]; 7061 u8 reserved_at_100[0x100];
7062}; 7062};
7063 7063
7064#endif /* MLX5_IFC_H */ 7064#endif /* MLX5_IFC_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 289c2314d766..5440b7b705eb 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3718,7 +3718,7 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
3718void *netdev_lower_get_next(struct net_device *dev, 3718void *netdev_lower_get_next(struct net_device *dev,
3719 struct list_head **iter); 3719 struct list_head **iter);
3720#define netdev_for_each_lower_dev(dev, ldev, iter) \ 3720#define netdev_for_each_lower_dev(dev, ldev, iter) \
3721 for (iter = &(dev)->adj_list.lower, \ 3721 for (iter = (dev)->adj_list.lower.next, \
3722 ldev = netdev_lower_get_next(dev, &(iter)); \ 3722 ldev = netdev_lower_get_next(dev, &(iter)); \
3723 ldev; \ 3723 ldev; \
3724 ldev = netdev_lower_get_next(dev, &(iter))) 3724 ldev = netdev_lower_get_next(dev, &(iter)))
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 48e0320cd643..67300f8e5f2f 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -550,9 +550,7 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
550 550
551static inline loff_t nfs_size_to_loff_t(__u64 size) 551static inline loff_t nfs_size_to_loff_t(__u64 size)
552{ 552{
553 if (size > (__u64) OFFSET_MAX - 1) 553 return min_t(u64, size, OFFSET_MAX);
554 return OFFSET_MAX - 1;
555 return (loff_t) size;
556} 554}
557 555
558static inline ino_t 556static inline ino_t
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 791098a08a87..d320906cf13e 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -275,6 +275,7 @@ struct nfs4_layoutcommit_args {
275 size_t layoutupdate_len; 275 size_t layoutupdate_len;
276 struct page *layoutupdate_page; 276 struct page *layoutupdate_page;
277 struct page **layoutupdate_pages; 277 struct page **layoutupdate_pages;
278 __be32 *start_p;
278}; 279};
279 280
280struct nfs4_layoutcommit_res { 281struct nfs4_layoutcommit_res {
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 27df4a6585da..27716254dcc5 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -988,23 +988,6 @@ static inline int pci_is_managed(struct pci_dev *pdev)
988 return pdev->is_managed; 988 return pdev->is_managed;
989} 989}
990 990
991static inline void pci_set_managed_irq(struct pci_dev *pdev, unsigned int irq)
992{
993 pdev->irq = irq;
994 pdev->irq_managed = 1;
995}
996
997static inline void pci_reset_managed_irq(struct pci_dev *pdev)
998{
999 pdev->irq = 0;
1000 pdev->irq_managed = 0;
1001}
1002
1003static inline bool pci_has_managed_irq(struct pci_dev *pdev)
1004{
1005 return pdev->irq_managed && pdev->irq > 0;
1006}
1007
1008void pci_disable_device(struct pci_dev *dev); 991void pci_disable_device(struct pci_dev *dev);
1009 992
1010extern unsigned int pcibios_max_latency; 993extern unsigned int pcibios_max_latency;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index b35a61a481fa..f5c5a3fa2c81 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -397,6 +397,7 @@ struct pmu {
397 * enum perf_event_active_state - the states of a event 397 * enum perf_event_active_state - the states of a event
398 */ 398 */
399enum perf_event_active_state { 399enum perf_event_active_state {
400 PERF_EVENT_STATE_DEAD = -4,
400 PERF_EVENT_STATE_EXIT = -3, 401 PERF_EVENT_STATE_EXIT = -3,
401 PERF_EVENT_STATE_ERROR = -2, 402 PERF_EVENT_STATE_ERROR = -2,
402 PERF_EVENT_STATE_OFF = -1, 403 PERF_EVENT_STATE_OFF = -1,
@@ -905,7 +906,7 @@ perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
905 } 906 }
906} 907}
907 908
908extern struct static_key_deferred perf_sched_events; 909extern struct static_key_false perf_sched_events;
909 910
910static __always_inline bool 911static __always_inline bool
911perf_sw_migrate_enabled(void) 912perf_sw_migrate_enabled(void)
@@ -924,7 +925,7 @@ static inline void perf_event_task_migrate(struct task_struct *task)
924static inline void perf_event_task_sched_in(struct task_struct *prev, 925static inline void perf_event_task_sched_in(struct task_struct *prev,
925 struct task_struct *task) 926 struct task_struct *task)
926{ 927{
927 if (static_key_false(&perf_sched_events.key)) 928 if (static_branch_unlikely(&perf_sched_events))
928 __perf_event_task_sched_in(prev, task); 929 __perf_event_task_sched_in(prev, task);
929 930
930 if (perf_sw_migrate_enabled() && task->sched_migrated) { 931 if (perf_sw_migrate_enabled() && task->sched_migrated) {
@@ -941,7 +942,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
941{ 942{
942 perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); 943 perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
943 944
944 if (static_key_false(&perf_sched_events.key)) 945 if (static_branch_unlikely(&perf_sched_events))
945 __perf_event_task_sched_out(prev, next); 946 __perf_event_task_sched_out(prev, next);
946} 947}
947 948
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index 998d8f1c3c91..b50c0492629d 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -49,6 +49,7 @@ struct bq27xxx_reg_cache {
49 49
50struct bq27xxx_device_info { 50struct bq27xxx_device_info {
51 struct device *dev; 51 struct device *dev;
52 int id;
52 enum bq27xxx_chip chip; 53 enum bq27xxx_chip chip;
53 const char *name; 54 const char *name;
54 struct bq27xxx_access_methods bus; 55 struct bq27xxx_access_methods bus;
diff --git a/include/linux/random.h b/include/linux/random.h
index a75840c1aa71..9c29122037f9 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -34,6 +34,7 @@ extern const struct file_operations random_fops, urandom_fops;
34#endif 34#endif
35 35
36unsigned int get_random_int(void); 36unsigned int get_random_int(void);
37unsigned long get_random_long(void);
37unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len); 38unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
38 39
39u32 prandom_u32(void); 40u32 prandom_u32(void);
diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h
index 343c13ac4f71..35cb9264e0d5 100644
--- a/include/linux/soc/ti/knav_dma.h
+++ b/include/linux/soc/ti/knav_dma.h
@@ -44,6 +44,7 @@
44 44
45#define KNAV_DMA_NUM_EPIB_WORDS 4 45#define KNAV_DMA_NUM_EPIB_WORDS 4
46#define KNAV_DMA_NUM_PS_WORDS 16 46#define KNAV_DMA_NUM_PS_WORDS 16
47#define KNAV_DMA_NUM_SW_DATA_WORDS 4
47#define KNAV_DMA_FDQ_PER_CHAN 4 48#define KNAV_DMA_FDQ_PER_CHAN 4
48 49
49/* Tx channel scheduling priority */ 50/* Tx channel scheduling priority */
@@ -142,6 +143,7 @@ struct knav_dma_cfg {
142 * @orig_buff: buff pointer since 'buff' can be overwritten 143 * @orig_buff: buff pointer since 'buff' can be overwritten
143 * @epib: Extended packet info block 144 * @epib: Extended packet info block
144 * @psdata: Protocol specific 145 * @psdata: Protocol specific
146 * @sw_data: Software private data not touched by h/w
145 */ 147 */
146struct knav_dma_desc { 148struct knav_dma_desc {
147 __le32 desc_info; 149 __le32 desc_info;
@@ -154,7 +156,7 @@ struct knav_dma_desc {
154 __le32 orig_buff; 156 __le32 orig_buff;
155 __le32 epib[KNAV_DMA_NUM_EPIB_WORDS]; 157 __le32 epib[KNAV_DMA_NUM_EPIB_WORDS];
156 __le32 psdata[KNAV_DMA_NUM_PS_WORDS]; 158 __le32 psdata[KNAV_DMA_NUM_PS_WORDS];
157 __le32 pad[4]; 159 u32 sw_data[KNAV_DMA_NUM_SW_DATA_WORDS];
158} ____cacheline_aligned; 160} ____cacheline_aligned;
159 161
160#if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA) 162#if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA)
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 429fdfc3baf5..925730bc9fc1 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -568,6 +568,8 @@ enum {
568 FILTER_DYN_STRING, 568 FILTER_DYN_STRING,
569 FILTER_PTR_STRING, 569 FILTER_PTR_STRING,
570 FILTER_TRACE_FN, 570 FILTER_TRACE_FN,
571 FILTER_COMM,
572 FILTER_CPU,
571}; 573};
572 574
573extern int trace_event_raw_init(struct trace_event_call *call); 575extern int trace_event_raw_init(struct trace_event_call *call);
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index acd522a91539..acfdbf353a0b 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -14,8 +14,10 @@
14 * See the file COPYING for more details. 14 * See the file COPYING for more details.
15 */ 15 */
16 16
17#include <linux/smp.h>
17#include <linux/errno.h> 18#include <linux/errno.h>
18#include <linux/types.h> 19#include <linux/types.h>
20#include <linux/cpumask.h>
19#include <linux/rcupdate.h> 21#include <linux/rcupdate.h>
20#include <linux/tracepoint-defs.h> 22#include <linux/tracepoint-defs.h>
21 23
@@ -132,6 +134,9 @@ extern void syscall_unregfunc(void);
132 void *it_func; \ 134 void *it_func; \
133 void *__data; \ 135 void *__data; \
134 \ 136 \
137 if (!cpu_online(raw_smp_processor_id())) \
138 return; \
139 \
135 if (!(cond)) \ 140 if (!(cond)) \
136 return; \ 141 return; \
137 prercu; \ 142 prercu; \
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index b333c945e571..d0b5ca5d4e08 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -198,6 +198,7 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
198void wbc_detach_inode(struct writeback_control *wbc); 198void wbc_detach_inode(struct writeback_control *wbc);
199void wbc_account_io(struct writeback_control *wbc, struct page *page, 199void wbc_account_io(struct writeback_control *wbc, struct page *page,
200 size_t bytes); 200 size_t bytes);
201void cgroup_writeback_umount(void);
201 202
202/** 203/**
203 * inode_attach_wb - associate an inode with its wb 204 * inode_attach_wb - associate an inode with its wb
@@ -301,6 +302,10 @@ static inline void wbc_account_io(struct writeback_control *wbc,
301{ 302{
302} 303}
303 304
305static inline void cgroup_writeback_umount(void)
306{
307}
308
304#endif /* CONFIG_CGROUP_WRITEBACK */ 309#endif /* CONFIG_CGROUP_WRITEBACK */
305 310
306/* 311/*
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 481fe1c9044c..49dcad4fe99e 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -270,8 +270,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
270 struct sock *newsk, 270 struct sock *newsk,
271 const struct request_sock *req); 271 const struct request_sock *req);
272 272
273void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req, 273struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
274 struct sock *child); 274 struct request_sock *req,
275 struct sock *child);
275void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, 276void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
276 unsigned long timeout); 277 unsigned long timeout);
277struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child, 278struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 7029527725dd..4079fc18ffe4 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -61,6 +61,7 @@ struct fib_nh_exception {
61 struct rtable __rcu *fnhe_rth_input; 61 struct rtable __rcu *fnhe_rth_input;
62 struct rtable __rcu *fnhe_rth_output; 62 struct rtable __rcu *fnhe_rth_output;
63 unsigned long fnhe_stamp; 63 unsigned long fnhe_stamp;
64 struct rcu_head rcu;
64}; 65};
65 66
66struct fnhe_hash_bucket { 67struct fnhe_hash_bucket {
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index e2b712c90d3f..c21c38ce7450 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -343,7 +343,7 @@ void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus);
343void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus); 343void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus);
344 344
345void snd_hdac_bus_update_rirb(struct hdac_bus *bus); 345void snd_hdac_bus_update_rirb(struct hdac_bus *bus);
346void snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status, 346int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
347 void (*ack)(struct hdac_bus *, 347 void (*ack)(struct hdac_bus *,
348 struct hdac_stream *)); 348 struct hdac_stream *));
349 349
diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h
index 1e3c8cb43bd7..625b38f65764 100644
--- a/include/uapi/linux/media.h
+++ b/include/uapi/linux/media.h
@@ -66,27 +66,33 @@ struct media_device_info {
66/* 66/*
67 * DVB entities 67 * DVB entities
68 */ 68 */
69#define MEDIA_ENT_F_DTV_DEMOD (MEDIA_ENT_F_BASE + 1) 69#define MEDIA_ENT_F_DTV_DEMOD (MEDIA_ENT_F_BASE + 0x00001)
70#define MEDIA_ENT_F_TS_DEMUX (MEDIA_ENT_F_BASE + 2) 70#define MEDIA_ENT_F_TS_DEMUX (MEDIA_ENT_F_BASE + 0x00002)
71#define MEDIA_ENT_F_DTV_CA (MEDIA_ENT_F_BASE + 3) 71#define MEDIA_ENT_F_DTV_CA (MEDIA_ENT_F_BASE + 0x00003)
72#define MEDIA_ENT_F_DTV_NET_DECAP (MEDIA_ENT_F_BASE + 4) 72#define MEDIA_ENT_F_DTV_NET_DECAP (MEDIA_ENT_F_BASE + 0x00004)
73 73
74/* 74/*
75 * Connectors 75 * I/O entities
76 */ 76 */
77/* It is a responsibility of the entity drivers to add connectors and links */ 77#define MEDIA_ENT_F_IO_DTV (MEDIA_ENT_F_BASE + 0x01001)
78#define MEDIA_ENT_F_CONN_RF (MEDIA_ENT_F_BASE + 21) 78#define MEDIA_ENT_F_IO_VBI (MEDIA_ENT_F_BASE + 0x01002)
79#define MEDIA_ENT_F_CONN_SVIDEO (MEDIA_ENT_F_BASE + 22) 79#define MEDIA_ENT_F_IO_SWRADIO (MEDIA_ENT_F_BASE + 0x01003)
80#define MEDIA_ENT_F_CONN_COMPOSITE (MEDIA_ENT_F_BASE + 23)
81/* For internal test signal generators and other debug connectors */
82#define MEDIA_ENT_F_CONN_TEST (MEDIA_ENT_F_BASE + 24)
83 80
84/* 81/*
85 * I/O entities 82 * Connectors
86 */ 83 */
87#define MEDIA_ENT_F_IO_DTV (MEDIA_ENT_F_BASE + 31) 84/* It is a responsibility of the entity drivers to add connectors and links */
88#define MEDIA_ENT_F_IO_VBI (MEDIA_ENT_F_BASE + 32) 85#ifdef __KERNEL__
89#define MEDIA_ENT_F_IO_SWRADIO (MEDIA_ENT_F_BASE + 33) 86 /*
87 * For now, it should not be used in userspace, as some
88 * definitions may change
89 */
90
91#define MEDIA_ENT_F_CONN_RF (MEDIA_ENT_F_BASE + 0x30001)
92#define MEDIA_ENT_F_CONN_SVIDEO (MEDIA_ENT_F_BASE + 0x30002)
93#define MEDIA_ENT_F_CONN_COMPOSITE (MEDIA_ENT_F_BASE + 0x30003)
94
95#endif
90 96
91/* 97/*
92 * Don't touch on those. The ranges MEDIA_ENT_F_OLD_BASE and 98 * Don't touch on those. The ranges MEDIA_ENT_F_OLD_BASE and
@@ -291,14 +297,14 @@ struct media_v2_entity {
291 __u32 id; 297 __u32 id;
292 char name[64]; /* FIXME: move to a property? (RFC says so) */ 298 char name[64]; /* FIXME: move to a property? (RFC says so) */
293 __u32 function; /* Main function of the entity */ 299 __u32 function; /* Main function of the entity */
294 __u16 reserved[12]; 300 __u32 reserved[6];
295}; 301} __attribute__ ((packed));
296 302
297/* Should match the specific fields at media_intf_devnode */ 303/* Should match the specific fields at media_intf_devnode */
298struct media_v2_intf_devnode { 304struct media_v2_intf_devnode {
299 __u32 major; 305 __u32 major;
300 __u32 minor; 306 __u32 minor;
301}; 307} __attribute__ ((packed));
302 308
303struct media_v2_interface { 309struct media_v2_interface {
304 __u32 id; 310 __u32 id;
@@ -310,22 +316,22 @@ struct media_v2_interface {
310 struct media_v2_intf_devnode devnode; 316 struct media_v2_intf_devnode devnode;
311 __u32 raw[16]; 317 __u32 raw[16];
312 }; 318 };
313}; 319} __attribute__ ((packed));
314 320
315struct media_v2_pad { 321struct media_v2_pad {
316 __u32 id; 322 __u32 id;
317 __u32 entity_id; 323 __u32 entity_id;
318 __u32 flags; 324 __u32 flags;
319 __u16 reserved[9]; 325 __u32 reserved[5];
320}; 326} __attribute__ ((packed));
321 327
322struct media_v2_link { 328struct media_v2_link {
323 __u32 id; 329 __u32 id;
324 __u32 source_id; 330 __u32 source_id;
325 __u32 sink_id; 331 __u32 sink_id;
326 __u32 flags; 332 __u32 flags;
327 __u32 reserved[5]; 333 __u32 reserved[6];
328}; 334} __attribute__ ((packed));
329 335
330struct media_v2_topology { 336struct media_v2_topology {
331 __u64 topology_version; 337 __u64 topology_version;
@@ -345,7 +351,7 @@ struct media_v2_topology {
345 __u32 num_links; 351 __u32 num_links;
346 __u32 reserved4; 352 __u32 reserved4;
347 __u64 ptr_links; 353 __u64 ptr_links;
348}; 354} __attribute__ ((packed));
349 355
350static inline void __user *media_get_uptr(__u64 arg) 356static inline void __user *media_get_uptr(__u64 arg)
351{ 357{
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index 5b4a4be06e2b..cc68b92124d4 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -66,14 +66,18 @@ struct nd_cmd_ars_cap {
66 __u64 length; 66 __u64 length;
67 __u32 status; 67 __u32 status;
68 __u32 max_ars_out; 68 __u32 max_ars_out;
69 __u32 clear_err_unit;
70 __u32 reserved;
69} __packed; 71} __packed;
70 72
71struct nd_cmd_ars_start { 73struct nd_cmd_ars_start {
72 __u64 address; 74 __u64 address;
73 __u64 length; 75 __u64 length;
74 __u16 type; 76 __u16 type;
75 __u8 reserved[6]; 77 __u8 flags;
78 __u8 reserved[5];
76 __u32 status; 79 __u32 status;
80 __u32 scrub_time;
77} __packed; 81} __packed;
78 82
79struct nd_cmd_ars_status { 83struct nd_cmd_ars_status {
@@ -81,11 +85,14 @@ struct nd_cmd_ars_status {
81 __u32 out_length; 85 __u32 out_length;
82 __u64 address; 86 __u64 address;
83 __u64 length; 87 __u64 length;
88 __u64 restart_address;
89 __u64 restart_length;
84 __u16 type; 90 __u16 type;
91 __u16 flags;
85 __u32 num_records; 92 __u32 num_records;
86 struct nd_ars_record { 93 struct nd_ars_record {
87 __u32 handle; 94 __u32 handle;
88 __u32 flags; 95 __u32 reserved;
89 __u64 err_address; 96 __u64 err_address;
90 __u64 length; 97 __u64 length;
91 } __packed records[0]; 98 } __packed records[0];
diff --git a/ipc/shm.c b/ipc/shm.c
index ed3027d0f277..331fc1b0b3c7 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -156,11 +156,12 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
156 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); 156 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
157 157
158 /* 158 /*
159 * We raced in the idr lookup or with shm_destroy(). Either way, the 159 * Callers of shm_lock() must validate the status of the returned ipc
160 * ID is busted. 160 * object pointer (as returned by ipc_lock()), and error out as
161 * appropriate.
161 */ 162 */
162 WARN_ON(IS_ERR(ipcp)); 163 if (IS_ERR(ipcp))
163 164 return (void *)ipcp;
164 return container_of(ipcp, struct shmid_kernel, shm_perm); 165 return container_of(ipcp, struct shmid_kernel, shm_perm);
165} 166}
166 167
@@ -186,18 +187,33 @@ static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
186} 187}
187 188
188 189
189/* This is called by fork, once for every shm attach. */ 190static int __shm_open(struct vm_area_struct *vma)
190static void shm_open(struct vm_area_struct *vma)
191{ 191{
192 struct file *file = vma->vm_file; 192 struct file *file = vma->vm_file;
193 struct shm_file_data *sfd = shm_file_data(file); 193 struct shm_file_data *sfd = shm_file_data(file);
194 struct shmid_kernel *shp; 194 struct shmid_kernel *shp;
195 195
196 shp = shm_lock(sfd->ns, sfd->id); 196 shp = shm_lock(sfd->ns, sfd->id);
197
198 if (IS_ERR(shp))
199 return PTR_ERR(shp);
200
197 shp->shm_atim = get_seconds(); 201 shp->shm_atim = get_seconds();
198 shp->shm_lprid = task_tgid_vnr(current); 202 shp->shm_lprid = task_tgid_vnr(current);
199 shp->shm_nattch++; 203 shp->shm_nattch++;
200 shm_unlock(shp); 204 shm_unlock(shp);
205 return 0;
206}
207
208/* This is called by fork, once for every shm attach. */
209static void shm_open(struct vm_area_struct *vma)
210{
211 int err = __shm_open(vma);
212 /*
213 * We raced in the idr lookup or with shm_destroy().
214 * Either way, the ID is busted.
215 */
216 WARN_ON_ONCE(err);
201} 217}
202 218
203/* 219/*
@@ -260,6 +276,14 @@ static void shm_close(struct vm_area_struct *vma)
260 down_write(&shm_ids(ns).rwsem); 276 down_write(&shm_ids(ns).rwsem);
261 /* remove from the list of attaches of the shm segment */ 277 /* remove from the list of attaches of the shm segment */
262 shp = shm_lock(ns, sfd->id); 278 shp = shm_lock(ns, sfd->id);
279
280 /*
281 * We raced in the idr lookup or with shm_destroy().
282 * Either way, the ID is busted.
283 */
284 if (WARN_ON_ONCE(IS_ERR(shp)))
285 goto done; /* no-op */
286
263 shp->shm_lprid = task_tgid_vnr(current); 287 shp->shm_lprid = task_tgid_vnr(current);
264 shp->shm_dtim = get_seconds(); 288 shp->shm_dtim = get_seconds();
265 shp->shm_nattch--; 289 shp->shm_nattch--;
@@ -267,6 +291,7 @@ static void shm_close(struct vm_area_struct *vma)
267 shm_destroy(ns, shp); 291 shm_destroy(ns, shp);
268 else 292 else
269 shm_unlock(shp); 293 shm_unlock(shp);
294done:
270 up_write(&shm_ids(ns).rwsem); 295 up_write(&shm_ids(ns).rwsem);
271} 296}
272 297
@@ -388,17 +413,25 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
388 struct shm_file_data *sfd = shm_file_data(file); 413 struct shm_file_data *sfd = shm_file_data(file);
389 int ret; 414 int ret;
390 415
416 /*
417 * In case of remap_file_pages() emulation, the file can represent
418 * removed IPC ID: propogate shm_lock() error to caller.
419 */
420 ret =__shm_open(vma);
421 if (ret)
422 return ret;
423
391 ret = sfd->file->f_op->mmap(sfd->file, vma); 424 ret = sfd->file->f_op->mmap(sfd->file, vma);
392 if (ret != 0) 425 if (ret) {
426 shm_close(vma);
393 return ret; 427 return ret;
428 }
394 sfd->vm_ops = vma->vm_ops; 429 sfd->vm_ops = vma->vm_ops;
395#ifdef CONFIG_MMU 430#ifdef CONFIG_MMU
396 WARN_ON(!sfd->vm_ops->fault); 431 WARN_ON(!sfd->vm_ops->fault);
397#endif 432#endif
398 vma->vm_ops = &shm_vm_ops; 433 vma->vm_ops = &shm_vm_ops;
399 shm_open(vma); 434 return 0;
400
401 return ret;
402} 435}
403 436
404static int shm_release(struct inode *ino, struct file *file) 437static int shm_release(struct inode *ino, struct file *file)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5946460b2425..614614821f00 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -64,8 +64,17 @@ static void remote_function(void *data)
64 struct task_struct *p = tfc->p; 64 struct task_struct *p = tfc->p;
65 65
66 if (p) { 66 if (p) {
67 tfc->ret = -EAGAIN; 67 /* -EAGAIN */
68 if (task_cpu(p) != smp_processor_id() || !task_curr(p)) 68 if (task_cpu(p) != smp_processor_id())
69 return;
70
71 /*
72 * Now that we're on right CPU with IRQs disabled, we can test
73 * if we hit the right task without races.
74 */
75
76 tfc->ret = -ESRCH; /* No such (running) process */
77 if (p != current)
69 return; 78 return;
70 } 79 }
71 80
@@ -92,13 +101,17 @@ task_function_call(struct task_struct *p, remote_function_f func, void *info)
92 .p = p, 101 .p = p,
93 .func = func, 102 .func = func,
94 .info = info, 103 .info = info,
95 .ret = -ESRCH, /* No such (running) process */ 104 .ret = -EAGAIN,
96 }; 105 };
106 int ret;
97 107
98 if (task_curr(p)) 108 do {
99 smp_call_function_single(task_cpu(p), remote_function, &data, 1); 109 ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1);
110 if (!ret)
111 ret = data.ret;
112 } while (ret == -EAGAIN);
100 113
101 return data.ret; 114 return ret;
102} 115}
103 116
104/** 117/**
@@ -169,19 +182,6 @@ static bool is_kernel_event(struct perf_event *event)
169 * rely on ctx->is_active and therefore cannot use event_function_call(). 182 * rely on ctx->is_active and therefore cannot use event_function_call().
170 * See perf_install_in_context(). 183 * See perf_install_in_context().
171 * 184 *
172 * This is because we need a ctx->lock serialized variable (ctx->is_active)
173 * to reliably determine if a particular task/context is scheduled in. The
174 * task_curr() use in task_function_call() is racy in that a remote context
175 * switch is not a single atomic operation.
176 *
177 * As is, the situation is 'safe' because we set rq->curr before we do the
178 * actual context switch. This means that task_curr() will fail early, but
179 * we'll continue spinning on ctx->is_active until we've passed
180 * perf_event_task_sched_out().
181 *
182 * Without this ctx->lock serialized variable we could have race where we find
183 * the task (and hence the context) would not be active while in fact they are.
184 *
185 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set. 185 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
186 */ 186 */
187 187
@@ -212,7 +212,7 @@ static int event_function(void *info)
212 */ 212 */
213 if (ctx->task) { 213 if (ctx->task) {
214 if (ctx->task != current) { 214 if (ctx->task != current) {
215 ret = -EAGAIN; 215 ret = -ESRCH;
216 goto unlock; 216 goto unlock;
217 } 217 }
218 218
@@ -276,10 +276,10 @@ static void event_function_call(struct perf_event *event, event_f func, void *da
276 return; 276 return;
277 } 277 }
278 278
279again:
280 if (task == TASK_TOMBSTONE) 279 if (task == TASK_TOMBSTONE)
281 return; 280 return;
282 281
282again:
283 if (!task_function_call(task, event_function, &efs)) 283 if (!task_function_call(task, event_function, &efs))
284 return; 284 return;
285 285
@@ -289,13 +289,15 @@ again:
289 * a concurrent perf_event_context_sched_out(). 289 * a concurrent perf_event_context_sched_out().
290 */ 290 */
291 task = ctx->task; 291 task = ctx->task;
292 if (task != TASK_TOMBSTONE) { 292 if (task == TASK_TOMBSTONE) {
293 if (ctx->is_active) { 293 raw_spin_unlock_irq(&ctx->lock);
294 raw_spin_unlock_irq(&ctx->lock); 294 return;
295 goto again;
296 }
297 func(event, NULL, ctx, data);
298 } 295 }
296 if (ctx->is_active) {
297 raw_spin_unlock_irq(&ctx->lock);
298 goto again;
299 }
300 func(event, NULL, ctx, data);
299 raw_spin_unlock_irq(&ctx->lock); 301 raw_spin_unlock_irq(&ctx->lock);
300} 302}
301 303
@@ -314,6 +316,7 @@ again:
314enum event_type_t { 316enum event_type_t {
315 EVENT_FLEXIBLE = 0x1, 317 EVENT_FLEXIBLE = 0x1,
316 EVENT_PINNED = 0x2, 318 EVENT_PINNED = 0x2,
319 EVENT_TIME = 0x4,
317 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, 320 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
318}; 321};
319 322
@@ -321,7 +324,13 @@ enum event_type_t {
321 * perf_sched_events : >0 events exist 324 * perf_sched_events : >0 events exist
322 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu 325 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
323 */ 326 */
324struct static_key_deferred perf_sched_events __read_mostly; 327
328static void perf_sched_delayed(struct work_struct *work);
329DEFINE_STATIC_KEY_FALSE(perf_sched_events);
330static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
331static DEFINE_MUTEX(perf_sched_mutex);
332static atomic_t perf_sched_count;
333
325static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); 334static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
326static DEFINE_PER_CPU(int, perf_sched_cb_usages); 335static DEFINE_PER_CPU(int, perf_sched_cb_usages);
327 336
@@ -1288,16 +1297,18 @@ static u64 perf_event_time(struct perf_event *event)
1288 1297
1289/* 1298/*
1290 * Update the total_time_enabled and total_time_running fields for a event. 1299 * Update the total_time_enabled and total_time_running fields for a event.
1291 * The caller of this function needs to hold the ctx->lock.
1292 */ 1300 */
1293static void update_event_times(struct perf_event *event) 1301static void update_event_times(struct perf_event *event)
1294{ 1302{
1295 struct perf_event_context *ctx = event->ctx; 1303 struct perf_event_context *ctx = event->ctx;
1296 u64 run_end; 1304 u64 run_end;
1297 1305
1306 lockdep_assert_held(&ctx->lock);
1307
1298 if (event->state < PERF_EVENT_STATE_INACTIVE || 1308 if (event->state < PERF_EVENT_STATE_INACTIVE ||
1299 event->group_leader->state < PERF_EVENT_STATE_INACTIVE) 1309 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
1300 return; 1310 return;
1311
1301 /* 1312 /*
1302 * in cgroup mode, time_enabled represents 1313 * in cgroup mode, time_enabled represents
1303 * the time the event was enabled AND active 1314 * the time the event was enabled AND active
@@ -1645,7 +1656,7 @@ out:
1645 1656
1646static bool is_orphaned_event(struct perf_event *event) 1657static bool is_orphaned_event(struct perf_event *event)
1647{ 1658{
1648 return event->state == PERF_EVENT_STATE_EXIT; 1659 return event->state == PERF_EVENT_STATE_DEAD;
1649} 1660}
1650 1661
1651static inline int pmu_filter_match(struct perf_event *event) 1662static inline int pmu_filter_match(struct perf_event *event)
@@ -1690,14 +1701,14 @@ event_sched_out(struct perf_event *event,
1690 1701
1691 perf_pmu_disable(event->pmu); 1702 perf_pmu_disable(event->pmu);
1692 1703
1704 event->tstamp_stopped = tstamp;
1705 event->pmu->del(event, 0);
1706 event->oncpu = -1;
1693 event->state = PERF_EVENT_STATE_INACTIVE; 1707 event->state = PERF_EVENT_STATE_INACTIVE;
1694 if (event->pending_disable) { 1708 if (event->pending_disable) {
1695 event->pending_disable = 0; 1709 event->pending_disable = 0;
1696 event->state = PERF_EVENT_STATE_OFF; 1710 event->state = PERF_EVENT_STATE_OFF;
1697 } 1711 }
1698 event->tstamp_stopped = tstamp;
1699 event->pmu->del(event, 0);
1700 event->oncpu = -1;
1701 1712
1702 if (!is_software_event(event)) 1713 if (!is_software_event(event))
1703 cpuctx->active_oncpu--; 1714 cpuctx->active_oncpu--;
@@ -1732,7 +1743,6 @@ group_sched_out(struct perf_event *group_event,
1732} 1743}
1733 1744
1734#define DETACH_GROUP 0x01UL 1745#define DETACH_GROUP 0x01UL
1735#define DETACH_STATE 0x02UL
1736 1746
1737/* 1747/*
1738 * Cross CPU call to remove a performance event 1748 * Cross CPU call to remove a performance event
@@ -1752,8 +1762,6 @@ __perf_remove_from_context(struct perf_event *event,
1752 if (flags & DETACH_GROUP) 1762 if (flags & DETACH_GROUP)
1753 perf_group_detach(event); 1763 perf_group_detach(event);
1754 list_del_event(event, ctx); 1764 list_del_event(event, ctx);
1755 if (flags & DETACH_STATE)
1756 event->state = PERF_EVENT_STATE_EXIT;
1757 1765
1758 if (!ctx->nr_events && ctx->is_active) { 1766 if (!ctx->nr_events && ctx->is_active) {
1759 ctx->is_active = 0; 1767 ctx->is_active = 0;
@@ -2063,14 +2071,27 @@ static void add_event_to_ctx(struct perf_event *event,
2063 event->tstamp_stopped = tstamp; 2071 event->tstamp_stopped = tstamp;
2064} 2072}
2065 2073
2066static void task_ctx_sched_out(struct perf_cpu_context *cpuctx, 2074static void ctx_sched_out(struct perf_event_context *ctx,
2067 struct perf_event_context *ctx); 2075 struct perf_cpu_context *cpuctx,
2076 enum event_type_t event_type);
2068static void 2077static void
2069ctx_sched_in(struct perf_event_context *ctx, 2078ctx_sched_in(struct perf_event_context *ctx,
2070 struct perf_cpu_context *cpuctx, 2079 struct perf_cpu_context *cpuctx,
2071 enum event_type_t event_type, 2080 enum event_type_t event_type,
2072 struct task_struct *task); 2081 struct task_struct *task);
2073 2082
2083static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
2084 struct perf_event_context *ctx)
2085{
2086 if (!cpuctx->task_ctx)
2087 return;
2088
2089 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2090 return;
2091
2092 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2093}
2094
2074static void perf_event_sched_in(struct perf_cpu_context *cpuctx, 2095static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2075 struct perf_event_context *ctx, 2096 struct perf_event_context *ctx,
2076 struct task_struct *task) 2097 struct task_struct *task)
@@ -2097,49 +2118,68 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
2097/* 2118/*
2098 * Cross CPU call to install and enable a performance event 2119 * Cross CPU call to install and enable a performance event
2099 * 2120 *
2100 * Must be called with ctx->mutex held 2121 * Very similar to remote_function() + event_function() but cannot assume that
2122 * things like ctx->is_active and cpuctx->task_ctx are set.
2101 */ 2123 */
2102static int __perf_install_in_context(void *info) 2124static int __perf_install_in_context(void *info)
2103{ 2125{
2104 struct perf_event_context *ctx = info; 2126 struct perf_event *event = info;
2127 struct perf_event_context *ctx = event->ctx;
2105 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2128 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2106 struct perf_event_context *task_ctx = cpuctx->task_ctx; 2129 struct perf_event_context *task_ctx = cpuctx->task_ctx;
2130 bool activate = true;
2131 int ret = 0;
2107 2132
2108 raw_spin_lock(&cpuctx->ctx.lock); 2133 raw_spin_lock(&cpuctx->ctx.lock);
2109 if (ctx->task) { 2134 if (ctx->task) {
2110 raw_spin_lock(&ctx->lock); 2135 raw_spin_lock(&ctx->lock);
2111 /*
2112 * If we hit the 'wrong' task, we've since scheduled and
2113 * everything should be sorted, nothing to do!
2114 */
2115 task_ctx = ctx; 2136 task_ctx = ctx;
2116 if (ctx->task != current) 2137
2138 /* If we're on the wrong CPU, try again */
2139 if (task_cpu(ctx->task) != smp_processor_id()) {
2140 ret = -ESRCH;
2117 goto unlock; 2141 goto unlock;
2142 }
2118 2143
2119 /* 2144 /*
2120 * If task_ctx is set, it had better be to us. 2145 * If we're on the right CPU, see if the task we target is
2146 * current, if not we don't have to activate the ctx, a future
2147 * context switch will do that for us.
2121 */ 2148 */
2122 WARN_ON_ONCE(cpuctx->task_ctx != ctx && cpuctx->task_ctx); 2149 if (ctx->task != current)
2150 activate = false;
2151 else
2152 WARN_ON_ONCE(cpuctx->task_ctx && cpuctx->task_ctx != ctx);
2153
2123 } else if (task_ctx) { 2154 } else if (task_ctx) {
2124 raw_spin_lock(&task_ctx->lock); 2155 raw_spin_lock(&task_ctx->lock);
2125 } 2156 }
2126 2157
2127 ctx_resched(cpuctx, task_ctx); 2158 if (activate) {
2159 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2160 add_event_to_ctx(event, ctx);
2161 ctx_resched(cpuctx, task_ctx);
2162 } else {
2163 add_event_to_ctx(event, ctx);
2164 }
2165
2128unlock: 2166unlock:
2129 perf_ctx_unlock(cpuctx, task_ctx); 2167 perf_ctx_unlock(cpuctx, task_ctx);
2130 2168
2131 return 0; 2169 return ret;
2132} 2170}
2133 2171
2134/* 2172/*
2135 * Attach a performance event to a context 2173 * Attach a performance event to a context.
2174 *
2175 * Very similar to event_function_call, see comment there.
2136 */ 2176 */
2137static void 2177static void
2138perf_install_in_context(struct perf_event_context *ctx, 2178perf_install_in_context(struct perf_event_context *ctx,
2139 struct perf_event *event, 2179 struct perf_event *event,
2140 int cpu) 2180 int cpu)
2141{ 2181{
2142 struct task_struct *task = NULL; 2182 struct task_struct *task = READ_ONCE(ctx->task);
2143 2183
2144 lockdep_assert_held(&ctx->mutex); 2184 lockdep_assert_held(&ctx->mutex);
2145 2185
@@ -2147,40 +2187,46 @@ perf_install_in_context(struct perf_event_context *ctx,
2147 if (event->cpu != -1) 2187 if (event->cpu != -1)
2148 event->cpu = cpu; 2188 event->cpu = cpu;
2149 2189
2190 if (!task) {
2191 cpu_function_call(cpu, __perf_install_in_context, event);
2192 return;
2193 }
2194
2195 /*
2196 * Should not happen, we validate the ctx is still alive before calling.
2197 */
2198 if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
2199 return;
2200
2150 /* 2201 /*
2151 * Installing events is tricky because we cannot rely on ctx->is_active 2202 * Installing events is tricky because we cannot rely on ctx->is_active
2152 * to be set in case this is the nr_events 0 -> 1 transition. 2203 * to be set in case this is the nr_events 0 -> 1 transition.
2153 *
2154 * So what we do is we add the event to the list here, which will allow
2155 * a future context switch to DTRT and then send a racy IPI. If the IPI
2156 * fails to hit the right task, this means a context switch must have
2157 * happened and that will have taken care of business.
2158 */ 2204 */
2159 raw_spin_lock_irq(&ctx->lock); 2205again:
2160 task = ctx->task;
2161 /* 2206 /*
2162 * Worse, we cannot even rely on the ctx actually existing anymore. If 2207 * Cannot use task_function_call() because we need to run on the task's
2163 * between find_get_context() and perf_install_in_context() the task 2208 * CPU regardless of whether its current or not.
2164 * went through perf_event_exit_task() its dead and we should not be
2165 * adding new events.
2166 */ 2209 */
2167 if (task == TASK_TOMBSTONE) { 2210 if (!cpu_function_call(task_cpu(task), __perf_install_in_context, event))
2211 return;
2212
2213 raw_spin_lock_irq(&ctx->lock);
2214 task = ctx->task;
2215 if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
2216 /*
2217 * Cannot happen because we already checked above (which also
2218 * cannot happen), and we hold ctx->mutex, which serializes us
2219 * against perf_event_exit_task_context().
2220 */
2168 raw_spin_unlock_irq(&ctx->lock); 2221 raw_spin_unlock_irq(&ctx->lock);
2169 return; 2222 return;
2170 } 2223 }
2171 update_context_time(ctx); 2224 raw_spin_unlock_irq(&ctx->lock);
2172 /* 2225 /*
2173 * Update cgrp time only if current cgrp matches event->cgrp. 2226 * Since !ctx->is_active doesn't mean anything, we must IPI
2174 * Must be done before calling add_event_to_ctx(). 2227 * unconditionally.
2175 */ 2228 */
2176 update_cgrp_time_from_event(event); 2229 goto again;
2177 add_event_to_ctx(event, ctx);
2178 raw_spin_unlock_irq(&ctx->lock);
2179
2180 if (task)
2181 task_function_call(task, __perf_install_in_context, ctx);
2182 else
2183 cpu_function_call(cpu, __perf_install_in_context, ctx);
2184} 2230}
2185 2231
2186/* 2232/*
@@ -2219,17 +2265,18 @@ static void __perf_event_enable(struct perf_event *event,
2219 event->state <= PERF_EVENT_STATE_ERROR) 2265 event->state <= PERF_EVENT_STATE_ERROR)
2220 return; 2266 return;
2221 2267
2222 update_context_time(ctx); 2268 if (ctx->is_active)
2269 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2270
2223 __perf_event_mark_enabled(event); 2271 __perf_event_mark_enabled(event);
2224 2272
2225 if (!ctx->is_active) 2273 if (!ctx->is_active)
2226 return; 2274 return;
2227 2275
2228 if (!event_filter_match(event)) { 2276 if (!event_filter_match(event)) {
2229 if (is_cgroup_event(event)) { 2277 if (is_cgroup_event(event))
2230 perf_cgroup_set_timestamp(current, ctx); // XXX ?
2231 perf_cgroup_defer_enabled(event); 2278 perf_cgroup_defer_enabled(event);
2232 } 2279 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
2233 return; 2280 return;
2234 } 2281 }
2235 2282
@@ -2237,8 +2284,10 @@ static void __perf_event_enable(struct perf_event *event,
2237 * If the event is in a group and isn't the group leader, 2284 * If the event is in a group and isn't the group leader,
2238 * then don't put it on unless the group is on. 2285 * then don't put it on unless the group is on.
2239 */ 2286 */
2240 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) 2287 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
2288 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
2241 return; 2289 return;
2290 }
2242 2291
2243 task_ctx = cpuctx->task_ctx; 2292 task_ctx = cpuctx->task_ctx;
2244 if (ctx->task) 2293 if (ctx->task)
@@ -2344,24 +2393,33 @@ static void ctx_sched_out(struct perf_event_context *ctx,
2344 } 2393 }
2345 2394
2346 ctx->is_active &= ~event_type; 2395 ctx->is_active &= ~event_type;
2396 if (!(ctx->is_active & EVENT_ALL))
2397 ctx->is_active = 0;
2398
2347 if (ctx->task) { 2399 if (ctx->task) {
2348 WARN_ON_ONCE(cpuctx->task_ctx != ctx); 2400 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2349 if (!ctx->is_active) 2401 if (!ctx->is_active)
2350 cpuctx->task_ctx = NULL; 2402 cpuctx->task_ctx = NULL;
2351 } 2403 }
2352 2404
2353 update_context_time(ctx); 2405 is_active ^= ctx->is_active; /* changed bits */
2354 update_cgrp_time_from_cpuctx(cpuctx); 2406
2355 if (!ctx->nr_active) 2407 if (is_active & EVENT_TIME) {
2408 /* update (and stop) ctx time */
2409 update_context_time(ctx);
2410 update_cgrp_time_from_cpuctx(cpuctx);
2411 }
2412
2413 if (!ctx->nr_active || !(is_active & EVENT_ALL))
2356 return; 2414 return;
2357 2415
2358 perf_pmu_disable(ctx->pmu); 2416 perf_pmu_disable(ctx->pmu);
2359 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) { 2417 if (is_active & EVENT_PINNED) {
2360 list_for_each_entry(event, &ctx->pinned_groups, group_entry) 2418 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
2361 group_sched_out(event, cpuctx, ctx); 2419 group_sched_out(event, cpuctx, ctx);
2362 } 2420 }
2363 2421
2364 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) { 2422 if (is_active & EVENT_FLEXIBLE) {
2365 list_for_each_entry(event, &ctx->flexible_groups, group_entry) 2423 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
2366 group_sched_out(event, cpuctx, ctx); 2424 group_sched_out(event, cpuctx, ctx);
2367 } 2425 }
@@ -2641,18 +2699,6 @@ void __perf_event_task_sched_out(struct task_struct *task,
2641 perf_cgroup_sched_out(task, next); 2699 perf_cgroup_sched_out(task, next);
2642} 2700}
2643 2701
2644static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
2645 struct perf_event_context *ctx)
2646{
2647 if (!cpuctx->task_ctx)
2648 return;
2649
2650 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2651 return;
2652
2653 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2654}
2655
2656/* 2702/*
2657 * Called with IRQs disabled 2703 * Called with IRQs disabled
2658 */ 2704 */
@@ -2735,7 +2781,7 @@ ctx_sched_in(struct perf_event_context *ctx,
2735 if (likely(!ctx->nr_events)) 2781 if (likely(!ctx->nr_events))
2736 return; 2782 return;
2737 2783
2738 ctx->is_active |= event_type; 2784 ctx->is_active |= (event_type | EVENT_TIME);
2739 if (ctx->task) { 2785 if (ctx->task) {
2740 if (!is_active) 2786 if (!is_active)
2741 cpuctx->task_ctx = ctx; 2787 cpuctx->task_ctx = ctx;
@@ -2743,18 +2789,24 @@ ctx_sched_in(struct perf_event_context *ctx,
2743 WARN_ON_ONCE(cpuctx->task_ctx != ctx); 2789 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2744 } 2790 }
2745 2791
2746 now = perf_clock(); 2792 is_active ^= ctx->is_active; /* changed bits */
2747 ctx->timestamp = now; 2793
2748 perf_cgroup_set_timestamp(task, ctx); 2794 if (is_active & EVENT_TIME) {
2795 /* start ctx time */
2796 now = perf_clock();
2797 ctx->timestamp = now;
2798 perf_cgroup_set_timestamp(task, ctx);
2799 }
2800
2749 /* 2801 /*
2750 * First go through the list and put on any pinned groups 2802 * First go through the list and put on any pinned groups
2751 * in order to give them the best chance of going on. 2803 * in order to give them the best chance of going on.
2752 */ 2804 */
2753 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) 2805 if (is_active & EVENT_PINNED)
2754 ctx_pinned_sched_in(ctx, cpuctx); 2806 ctx_pinned_sched_in(ctx, cpuctx);
2755 2807
2756 /* Then walk through the lower prio flexible groups */ 2808 /* Then walk through the lower prio flexible groups */
2757 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) 2809 if (is_active & EVENT_FLEXIBLE)
2758 ctx_flexible_sched_in(ctx, cpuctx); 2810 ctx_flexible_sched_in(ctx, cpuctx);
2759} 2811}
2760 2812
@@ -3120,6 +3172,7 @@ static void perf_event_enable_on_exec(int ctxn)
3120 3172
3121 cpuctx = __get_cpu_context(ctx); 3173 cpuctx = __get_cpu_context(ctx);
3122 perf_ctx_lock(cpuctx, ctx); 3174 perf_ctx_lock(cpuctx, ctx);
3175 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
3123 list_for_each_entry(event, &ctx->event_list, event_entry) 3176 list_for_each_entry(event, &ctx->event_list, event_entry)
3124 enabled |= event_enable_on_exec(event, ctx); 3177 enabled |= event_enable_on_exec(event, ctx);
3125 3178
@@ -3537,12 +3590,22 @@ static void unaccount_event(struct perf_event *event)
3537 if (has_branch_stack(event)) 3590 if (has_branch_stack(event))
3538 dec = true; 3591 dec = true;
3539 3592
3540 if (dec) 3593 if (dec) {
3541 static_key_slow_dec_deferred(&perf_sched_events); 3594 if (!atomic_add_unless(&perf_sched_count, -1, 1))
3595 schedule_delayed_work(&perf_sched_work, HZ);
3596 }
3542 3597
3543 unaccount_event_cpu(event, event->cpu); 3598 unaccount_event_cpu(event, event->cpu);
3544} 3599}
3545 3600
3601static void perf_sched_delayed(struct work_struct *work)
3602{
3603 mutex_lock(&perf_sched_mutex);
3604 if (atomic_dec_and_test(&perf_sched_count))
3605 static_branch_disable(&perf_sched_events);
3606 mutex_unlock(&perf_sched_mutex);
3607}
3608
3546/* 3609/*
3547 * The following implement mutual exclusion of events on "exclusive" pmus 3610 * The following implement mutual exclusion of events on "exclusive" pmus
3548 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled 3611 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
@@ -3752,30 +3815,42 @@ static void put_event(struct perf_event *event)
3752 */ 3815 */
3753int perf_event_release_kernel(struct perf_event *event) 3816int perf_event_release_kernel(struct perf_event *event)
3754{ 3817{
3755 struct perf_event_context *ctx; 3818 struct perf_event_context *ctx = event->ctx;
3756 struct perf_event *child, *tmp; 3819 struct perf_event *child, *tmp;
3757 3820
3821 /*
3822 * If we got here through err_file: fput(event_file); we will not have
3823 * attached to a context yet.
3824 */
3825 if (!ctx) {
3826 WARN_ON_ONCE(event->attach_state &
3827 (PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP));
3828 goto no_ctx;
3829 }
3830
3758 if (!is_kernel_event(event)) 3831 if (!is_kernel_event(event))
3759 perf_remove_from_owner(event); 3832 perf_remove_from_owner(event);
3760 3833
3761 ctx = perf_event_ctx_lock(event); 3834 ctx = perf_event_ctx_lock(event);
3762 WARN_ON_ONCE(ctx->parent_ctx); 3835 WARN_ON_ONCE(ctx->parent_ctx);
3763 perf_remove_from_context(event, DETACH_GROUP | DETACH_STATE); 3836 perf_remove_from_context(event, DETACH_GROUP);
3764 perf_event_ctx_unlock(event, ctx);
3765 3837
3838 raw_spin_lock_irq(&ctx->lock);
3766 /* 3839 /*
3767 * At this point we must have event->state == PERF_EVENT_STATE_EXIT, 3840 * Mark this even as STATE_DEAD, there is no external reference to it
3768 * either from the above perf_remove_from_context() or through 3841 * anymore.
3769 * perf_event_exit_event().
3770 * 3842 *
3771 * Therefore, anybody acquiring event->child_mutex after the below 3843 * Anybody acquiring event->child_mutex after the below loop _must_
3772 * loop _must_ also see this, most importantly inherit_event() which 3844 * also see this, most importantly inherit_event() which will avoid
3773 * will avoid placing more children on the list. 3845 * placing more children on the list.
3774 * 3846 *
3775 * Thus this guarantees that we will in fact observe and kill _ALL_ 3847 * Thus this guarantees that we will in fact observe and kill _ALL_
3776 * child events. 3848 * child events.
3777 */ 3849 */
3778 WARN_ON_ONCE(event->state != PERF_EVENT_STATE_EXIT); 3850 event->state = PERF_EVENT_STATE_DEAD;
3851 raw_spin_unlock_irq(&ctx->lock);
3852
3853 perf_event_ctx_unlock(event, ctx);
3779 3854
3780again: 3855again:
3781 mutex_lock(&event->child_mutex); 3856 mutex_lock(&event->child_mutex);
@@ -3830,8 +3905,8 @@ again:
3830 } 3905 }
3831 mutex_unlock(&event->child_mutex); 3906 mutex_unlock(&event->child_mutex);
3832 3907
3833 /* Must be the last reference */ 3908no_ctx:
3834 put_event(event); 3909 put_event(event); /* Must be the 'last' reference */
3835 return 0; 3910 return 0;
3836} 3911}
3837EXPORT_SYMBOL_GPL(perf_event_release_kernel); 3912EXPORT_SYMBOL_GPL(perf_event_release_kernel);
@@ -3988,7 +4063,7 @@ static bool is_event_hup(struct perf_event *event)
3988{ 4063{
3989 bool no_children; 4064 bool no_children;
3990 4065
3991 if (event->state != PERF_EVENT_STATE_EXIT) 4066 if (event->state > PERF_EVENT_STATE_EXIT)
3992 return false; 4067 return false;
3993 4068
3994 mutex_lock(&event->child_mutex); 4069 mutex_lock(&event->child_mutex);
@@ -7769,8 +7844,28 @@ static void account_event(struct perf_event *event)
7769 if (is_cgroup_event(event)) 7844 if (is_cgroup_event(event))
7770 inc = true; 7845 inc = true;
7771 7846
7772 if (inc) 7847 if (inc) {
7773 static_key_slow_inc(&perf_sched_events.key); 7848 if (atomic_inc_not_zero(&perf_sched_count))
7849 goto enabled;
7850
7851 mutex_lock(&perf_sched_mutex);
7852 if (!atomic_read(&perf_sched_count)) {
7853 static_branch_enable(&perf_sched_events);
7854 /*
7855 * Guarantee that all CPUs observe they key change and
7856 * call the perf scheduling hooks before proceeding to
7857 * install events that need them.
7858 */
7859 synchronize_sched();
7860 }
7861 /*
7862 * Now that we have waited for the sync_sched(), allow further
7863 * increments to by-pass the mutex.
7864 */
7865 atomic_inc(&perf_sched_count);
7866 mutex_unlock(&perf_sched_mutex);
7867 }
7868enabled:
7774 7869
7775 account_event_cpu(event, event->cpu); 7870 account_event_cpu(event, event->cpu);
7776} 7871}
@@ -8389,10 +8484,19 @@ SYSCALL_DEFINE5(perf_event_open,
8389 if (move_group) { 8484 if (move_group) {
8390 gctx = group_leader->ctx; 8485 gctx = group_leader->ctx;
8391 mutex_lock_double(&gctx->mutex, &ctx->mutex); 8486 mutex_lock_double(&gctx->mutex, &ctx->mutex);
8487 if (gctx->task == TASK_TOMBSTONE) {
8488 err = -ESRCH;
8489 goto err_locked;
8490 }
8392 } else { 8491 } else {
8393 mutex_lock(&ctx->mutex); 8492 mutex_lock(&ctx->mutex);
8394 } 8493 }
8395 8494
8495 if (ctx->task == TASK_TOMBSTONE) {
8496 err = -ESRCH;
8497 goto err_locked;
8498 }
8499
8396 if (!perf_event_validate_size(event)) { 8500 if (!perf_event_validate_size(event)) {
8397 err = -E2BIG; 8501 err = -E2BIG;
8398 goto err_locked; 8502 goto err_locked;
@@ -8509,7 +8613,12 @@ err_context:
8509 perf_unpin_context(ctx); 8613 perf_unpin_context(ctx);
8510 put_ctx(ctx); 8614 put_ctx(ctx);
8511err_alloc: 8615err_alloc:
8512 free_event(event); 8616 /*
8617 * If event_file is set, the fput() above will have called ->release()
8618 * and that will take care of freeing the event.
8619 */
8620 if (!event_file)
8621 free_event(event);
8513err_cpus: 8622err_cpus:
8514 put_online_cpus(); 8623 put_online_cpus();
8515err_task: 8624err_task:
@@ -8563,12 +8672,14 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
8563 8672
8564 WARN_ON_ONCE(ctx->parent_ctx); 8673 WARN_ON_ONCE(ctx->parent_ctx);
8565 mutex_lock(&ctx->mutex); 8674 mutex_lock(&ctx->mutex);
8675 if (ctx->task == TASK_TOMBSTONE) {
8676 err = -ESRCH;
8677 goto err_unlock;
8678 }
8679
8566 if (!exclusive_event_installable(event, ctx)) { 8680 if (!exclusive_event_installable(event, ctx)) {
8567 mutex_unlock(&ctx->mutex);
8568 perf_unpin_context(ctx);
8569 put_ctx(ctx);
8570 err = -EBUSY; 8681 err = -EBUSY;
8571 goto err_free; 8682 goto err_unlock;
8572 } 8683 }
8573 8684
8574 perf_install_in_context(ctx, event, cpu); 8685 perf_install_in_context(ctx, event, cpu);
@@ -8577,6 +8688,10 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
8577 8688
8578 return event; 8689 return event;
8579 8690
8691err_unlock:
8692 mutex_unlock(&ctx->mutex);
8693 perf_unpin_context(ctx);
8694 put_ctx(ctx);
8580err_free: 8695err_free:
8581 free_event(event); 8696 free_event(event);
8582err: 8697err:
@@ -8695,7 +8810,7 @@ perf_event_exit_event(struct perf_event *child_event,
8695 if (parent_event) 8810 if (parent_event)
8696 perf_group_detach(child_event); 8811 perf_group_detach(child_event);
8697 list_del_event(child_event, child_ctx); 8812 list_del_event(child_event, child_ctx);
8698 child_event->state = PERF_EVENT_STATE_EXIT; /* see perf_event_release_kernel() */ 8813 child_event->state = PERF_EVENT_STATE_EXIT; /* is_event_hup() */
8699 raw_spin_unlock_irq(&child_ctx->lock); 8814 raw_spin_unlock_irq(&child_ctx->lock);
8700 8815
8701 /* 8816 /*
@@ -9206,7 +9321,7 @@ static void perf_event_init_cpu(int cpu)
9206 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 9321 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
9207 9322
9208 mutex_lock(&swhash->hlist_mutex); 9323 mutex_lock(&swhash->hlist_mutex);
9209 if (swhash->hlist_refcount > 0) { 9324 if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) {
9210 struct swevent_hlist *hlist; 9325 struct swevent_hlist *hlist;
9211 9326
9212 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); 9327 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
@@ -9282,11 +9397,9 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
9282 switch (action & ~CPU_TASKS_FROZEN) { 9397 switch (action & ~CPU_TASKS_FROZEN) {
9283 9398
9284 case CPU_UP_PREPARE: 9399 case CPU_UP_PREPARE:
9285 case CPU_DOWN_FAILED:
9286 perf_event_init_cpu(cpu); 9400 perf_event_init_cpu(cpu);
9287 break; 9401 break;
9288 9402
9289 case CPU_UP_CANCELED:
9290 case CPU_DOWN_PREPARE: 9403 case CPU_DOWN_PREPARE:
9291 perf_event_exit_cpu(cpu); 9404 perf_event_exit_cpu(cpu);
9292 break; 9405 break;
@@ -9315,9 +9428,6 @@ void __init perf_event_init(void)
9315 ret = init_hw_breakpoint(); 9428 ret = init_hw_breakpoint();
9316 WARN(ret, "hw_breakpoint initialization failed with: %d", ret); 9429 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
9317 9430
9318 /* do not patch jump label more than once per second */
9319 jump_label_rate_limit(&perf_sched_events, HZ);
9320
9321 /* 9431 /*
9322 * Build time assertion that we keep the data_head at the intended 9432 * Build time assertion that we keep the data_head at the intended
9323 * location. IOW, validation we got the __reserved[] size right. 9433 * location. IOW, validation we got the __reserved[] size right.
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 2c468dea60bc..b981a7b023f0 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -114,7 +114,7 @@ EXPORT_SYMBOL(memunmap);
114 114
115static void devm_memremap_release(struct device *dev, void *res) 115static void devm_memremap_release(struct device *dev, void *res)
116{ 116{
117 memunmap(res); 117 memunmap(*(void **)res);
118} 118}
119 119
120static int devm_memremap_match(struct device *dev, void *res, void *match_data) 120static int devm_memremap_match(struct device *dev, void *res, void *match_data)
@@ -136,8 +136,10 @@ void *devm_memremap(struct device *dev, resource_size_t offset,
136 if (addr) { 136 if (addr) {
137 *ptr = addr; 137 *ptr = addr;
138 devres_add(dev, ptr); 138 devres_add(dev, ptr);
139 } else 139 } else {
140 devres_free(ptr); 140 devres_free(ptr);
141 return ERR_PTR(-ENXIO);
142 }
141 143
142 return addr; 144 return addr;
143} 145}
diff --git a/kernel/module.c b/kernel/module.c
index 9537da37ce87..794ebe8e878d 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -984,6 +984,8 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
984 mod->exit(); 984 mod->exit();
985 blocking_notifier_call_chain(&module_notify_list, 985 blocking_notifier_call_chain(&module_notify_list,
986 MODULE_STATE_GOING, mod); 986 MODULE_STATE_GOING, mod);
987 ftrace_release_mod(mod);
988
987 async_synchronize_full(); 989 async_synchronize_full();
988 990
989 /* Store the name of the last unloaded module for diagnostic purposes */ 991 /* Store the name of the last unloaded module for diagnostic purposes */
@@ -3313,6 +3315,7 @@ fail:
3313 module_put(mod); 3315 module_put(mod);
3314 blocking_notifier_call_chain(&module_notify_list, 3316 blocking_notifier_call_chain(&module_notify_list,
3315 MODULE_STATE_GOING, mod); 3317 MODULE_STATE_GOING, mod);
3318 ftrace_release_mod(mod);
3316 free_module(mod); 3319 free_module(mod);
3317 wake_up_all(&module_wq); 3320 wake_up_all(&module_wq);
3318 return ret; 3321 return ret;
@@ -3389,6 +3392,7 @@ static int complete_formation(struct module *mod, struct load_info *info)
3389 mod->state = MODULE_STATE_COMING; 3392 mod->state = MODULE_STATE_COMING;
3390 mutex_unlock(&module_mutex); 3393 mutex_unlock(&module_mutex);
3391 3394
3395 ftrace_module_enable(mod);
3392 blocking_notifier_call_chain(&module_notify_list, 3396 blocking_notifier_call_chain(&module_notify_list,
3393 MODULE_STATE_COMING, mod); 3397 MODULE_STATE_COMING, mod);
3394 return 0; 3398 return 0;
diff --git a/kernel/resource.c b/kernel/resource.c
index 09c0597840b0..3669d1bfc425 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -1083,9 +1083,10 @@ struct resource * __request_region(struct resource *parent,
1083 if (!conflict) 1083 if (!conflict)
1084 break; 1084 break;
1085 if (conflict != parent) { 1085 if (conflict != parent) {
1086 parent = conflict; 1086 if (!(conflict->flags & IORESOURCE_BUSY)) {
1087 if (!(conflict->flags & IORESOURCE_BUSY)) 1087 parent = conflict;
1088 continue; 1088 continue;
1089 }
1089 } 1090 }
1090 if (conflict->flags & flags & IORESOURCE_MUXED) { 1091 if (conflict->flags & flags & IORESOURCE_MUXED) {
1091 add_wait_queue(&muxed_resource_wait, &wait); 1092 add_wait_queue(&muxed_resource_wait, &wait);
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index cd64c979d0e1..57b939c81bce 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -420,7 +420,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
420 * entity. 420 * entity.
421 */ 421 */
422 if (dl_time_before(dl_se->deadline, rq_clock(rq))) { 422 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
423 printk_deferred_once("sched: DL replenish lagged to much\n"); 423 printk_deferred_once("sched: DL replenish lagged too much\n");
424 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 424 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
425 dl_se->runtime = pi_se->dl_runtime; 425 dl_se->runtime = pi_se->dl_runtime;
426 } 426 }
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index eca592f977b2..57a6eea84694 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4961,7 +4961,7 @@ void ftrace_release_mod(struct module *mod)
4961 mutex_unlock(&ftrace_lock); 4961 mutex_unlock(&ftrace_lock);
4962} 4962}
4963 4963
4964static void ftrace_module_enable(struct module *mod) 4964void ftrace_module_enable(struct module *mod)
4965{ 4965{
4966 struct dyn_ftrace *rec; 4966 struct dyn_ftrace *rec;
4967 struct ftrace_page *pg; 4967 struct ftrace_page *pg;
@@ -5038,38 +5038,8 @@ void ftrace_module_init(struct module *mod)
5038 ftrace_process_locs(mod, mod->ftrace_callsites, 5038 ftrace_process_locs(mod, mod->ftrace_callsites,
5039 mod->ftrace_callsites + mod->num_ftrace_callsites); 5039 mod->ftrace_callsites + mod->num_ftrace_callsites);
5040} 5040}
5041
5042static int ftrace_module_notify(struct notifier_block *self,
5043 unsigned long val, void *data)
5044{
5045 struct module *mod = data;
5046
5047 switch (val) {
5048 case MODULE_STATE_COMING:
5049 ftrace_module_enable(mod);
5050 break;
5051 case MODULE_STATE_GOING:
5052 ftrace_release_mod(mod);
5053 break;
5054 default:
5055 break;
5056 }
5057
5058 return 0;
5059}
5060#else
5061static int ftrace_module_notify(struct notifier_block *self,
5062 unsigned long val, void *data)
5063{
5064 return 0;
5065}
5066#endif /* CONFIG_MODULES */ 5041#endif /* CONFIG_MODULES */
5067 5042
5068struct notifier_block ftrace_module_nb = {
5069 .notifier_call = ftrace_module_notify,
5070 .priority = INT_MIN, /* Run after anything that can remove kprobes */
5071};
5072
5073void __init ftrace_init(void) 5043void __init ftrace_init(void)
5074{ 5044{
5075 extern unsigned long __start_mcount_loc[]; 5045 extern unsigned long __start_mcount_loc[];
@@ -5098,10 +5068,6 @@ void __init ftrace_init(void)
5098 __start_mcount_loc, 5068 __start_mcount_loc,
5099 __stop_mcount_loc); 5069 __stop_mcount_loc);
5100 5070
5101 ret = register_module_notifier(&ftrace_module_nb);
5102 if (ret)
5103 pr_warning("Failed to register trace ftrace module exit notifier\n");
5104
5105 set_ftrace_early_filters(); 5071 set_ftrace_early_filters();
5106 5072
5107 return; 5073 return;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f333e57c4614..05ddc0820771 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -97,16 +97,16 @@ trace_find_event_field(struct trace_event_call *call, char *name)
97 struct ftrace_event_field *field; 97 struct ftrace_event_field *field;
98 struct list_head *head; 98 struct list_head *head;
99 99
100 field = __find_event_field(&ftrace_generic_fields, name); 100 head = trace_get_fields(call);
101 field = __find_event_field(head, name);
101 if (field) 102 if (field)
102 return field; 103 return field;
103 104
104 field = __find_event_field(&ftrace_common_fields, name); 105 field = __find_event_field(&ftrace_generic_fields, name);
105 if (field) 106 if (field)
106 return field; 107 return field;
107 108
108 head = trace_get_fields(call); 109 return __find_event_field(&ftrace_common_fields, name);
109 return __find_event_field(head, name);
110} 110}
111 111
112static int __trace_define_field(struct list_head *head, const char *type, 112static int __trace_define_field(struct list_head *head, const char *type,
@@ -171,8 +171,10 @@ static int trace_define_generic_fields(void)
171{ 171{
172 int ret; 172 int ret;
173 173
174 __generic_field(int, cpu, FILTER_OTHER); 174 __generic_field(int, CPU, FILTER_CPU);
175 __generic_field(char *, comm, FILTER_PTR_STRING); 175 __generic_field(int, cpu, FILTER_CPU);
176 __generic_field(char *, COMM, FILTER_COMM);
177 __generic_field(char *, comm, FILTER_COMM);
176 178
177 return ret; 179 return ret;
178} 180}
@@ -869,7 +871,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
869 * The ftrace subsystem is for showing formats only. 871 * The ftrace subsystem is for showing formats only.
870 * They can not be enabled or disabled via the event files. 872 * They can not be enabled or disabled via the event files.
871 */ 873 */
872 if (call->class && call->class->reg) 874 if (call->class && call->class->reg &&
875 !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
873 return file; 876 return file;
874 } 877 }
875 878
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index f93a219b18da..6816302542b2 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1043,13 +1043,14 @@ static int init_pred(struct filter_parse_state *ps,
1043 return -EINVAL; 1043 return -EINVAL;
1044 } 1044 }
1045 1045
1046 if (is_string_field(field)) { 1046 if (field->filter_type == FILTER_COMM) {
1047 filter_build_regex(pred);
1048 fn = filter_pred_comm;
1049 pred->regex.field_len = TASK_COMM_LEN;
1050 } else if (is_string_field(field)) {
1047 filter_build_regex(pred); 1051 filter_build_regex(pred);
1048 1052
1049 if (!strcmp(field->name, "comm")) { 1053 if (field->filter_type == FILTER_STATIC_STRING) {
1050 fn = filter_pred_comm;
1051 pred->regex.field_len = TASK_COMM_LEN;
1052 } else if (field->filter_type == FILTER_STATIC_STRING) {
1053 fn = filter_pred_string; 1054 fn = filter_pred_string;
1054 pred->regex.field_len = field->size; 1055 pred->regex.field_len = field->size;
1055 } else if (field->filter_type == FILTER_DYN_STRING) 1056 } else if (field->filter_type == FILTER_DYN_STRING)
@@ -1072,7 +1073,7 @@ static int init_pred(struct filter_parse_state *ps,
1072 } 1073 }
1073 pred->val = val; 1074 pred->val = val;
1074 1075
1075 if (!strcmp(field->name, "cpu")) 1076 if (field->filter_type == FILTER_CPU)
1076 fn = filter_pred_cpu; 1077 fn = filter_pred_cpu;
1077 else 1078 else
1078 fn = select_comparison_fn(pred->op, field->size, 1079 fn = select_comparison_fn(pred->op, field->size,
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 202df6cffcca..2a1abbaca10e 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -156,7 +156,11 @@ check_stack(unsigned long ip, unsigned long *stack)
156 for (; p < top && i < stack_trace_max.nr_entries; p++) { 156 for (; p < top && i < stack_trace_max.nr_entries; p++) {
157 if (stack_dump_trace[i] == ULONG_MAX) 157 if (stack_dump_trace[i] == ULONG_MAX)
158 break; 158 break;
159 if (*p == stack_dump_trace[i]) { 159 /*
160 * The READ_ONCE_NOCHECK is used to let KASAN know that
161 * this is not a stack-out-of-bounds error.
162 */
163 if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
160 stack_dump_trace[x] = stack_dump_trace[i++]; 164 stack_dump_trace[x] = stack_dump_trace[i++];
161 this_size = stack_trace_index[x++] = 165 this_size = stack_trace_index[x++] =
162 (top - p) * sizeof(unsigned long); 166 (top - p) * sizeof(unsigned long);
diff --git a/mm/filemap.c b/mm/filemap.c
index 23edccecadb0..3461d97ecb30 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -446,7 +446,8 @@ int filemap_write_and_wait(struct address_space *mapping)
446{ 446{
447 int err = 0; 447 int err = 0;
448 448
449 if (mapping->nrpages) { 449 if ((!dax_mapping(mapping) && mapping->nrpages) ||
450 (dax_mapping(mapping) && mapping->nrexceptional)) {
450 err = filemap_fdatawrite(mapping); 451 err = filemap_fdatawrite(mapping);
451 /* 452 /*
452 * Even if the above returned error, the pages may be 453 * Even if the above returned error, the pages may be
@@ -482,13 +483,8 @@ int filemap_write_and_wait_range(struct address_space *mapping,
482{ 483{
483 int err = 0; 484 int err = 0;
484 485
485 if (dax_mapping(mapping) && mapping->nrexceptional) { 486 if ((!dax_mapping(mapping) && mapping->nrpages) ||
486 err = dax_writeback_mapping_range(mapping, lstart, lend); 487 (dax_mapping(mapping) && mapping->nrexceptional)) {
487 if (err)
488 return err;
489 }
490
491 if (mapping->nrpages) {
492 err = __filemap_fdatawrite_range(mapping, lstart, lend, 488 err = __filemap_fdatawrite_range(mapping, lstart, lend,
493 WB_SYNC_ALL); 489 WB_SYNC_ALL);
494 /* See comment of filemap_write_and_wait() */ 490 /* See comment of filemap_write_and_wait() */
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 08fc0ba2207e..e10a4fee88d2 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1700,7 +1700,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1700 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); 1700 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1701 VM_BUG_ON(!pmd_none(*new_pmd)); 1701 VM_BUG_ON(!pmd_none(*new_pmd));
1702 1702
1703 if (pmd_move_must_withdraw(new_ptl, old_ptl)) { 1703 if (pmd_move_must_withdraw(new_ptl, old_ptl) &&
1704 vma_is_anonymous(vma)) {
1704 pgtable_t pgtable; 1705 pgtable_t pgtable;
1705 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); 1706 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1706 pgtable_trans_huge_deposit(mm, new_pmd, pgtable); 1707 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
@@ -2835,6 +2836,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2835 pgtable_t pgtable; 2836 pgtable_t pgtable;
2836 pmd_t _pmd; 2837 pmd_t _pmd;
2837 bool young, write, dirty; 2838 bool young, write, dirty;
2839 unsigned long addr;
2838 int i; 2840 int i;
2839 2841
2840 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); 2842 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
@@ -2860,10 +2862,11 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2860 young = pmd_young(*pmd); 2862 young = pmd_young(*pmd);
2861 dirty = pmd_dirty(*pmd); 2863 dirty = pmd_dirty(*pmd);
2862 2864
2865 pmdp_huge_split_prepare(vma, haddr, pmd);
2863 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2866 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2864 pmd_populate(mm, &_pmd, pgtable); 2867 pmd_populate(mm, &_pmd, pgtable);
2865 2868
2866 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 2869 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2867 pte_t entry, *pte; 2870 pte_t entry, *pte;
2868 /* 2871 /*
2869 * Note that NUMA hinting access restrictions are not 2872 * Note that NUMA hinting access restrictions are not
@@ -2884,9 +2887,9 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2884 } 2887 }
2885 if (dirty) 2888 if (dirty)
2886 SetPageDirty(page + i); 2889 SetPageDirty(page + i);
2887 pte = pte_offset_map(&_pmd, haddr); 2890 pte = pte_offset_map(&_pmd, addr);
2888 BUG_ON(!pte_none(*pte)); 2891 BUG_ON(!pte_none(*pte));
2889 set_pte_at(mm, haddr, pte, entry); 2892 set_pte_at(mm, addr, pte, entry);
2890 atomic_inc(&page[i]._mapcount); 2893 atomic_inc(&page[i]._mapcount);
2891 pte_unmap(pte); 2894 pte_unmap(pte);
2892 } 2895 }
@@ -2936,7 +2939,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2936 pmd_populate(mm, pmd, pgtable); 2939 pmd_populate(mm, pmd, pgtable);
2937 2940
2938 if (freeze) { 2941 if (freeze) {
2939 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 2942 for (i = 0; i < HPAGE_PMD_NR; i++) {
2940 page_remove_rmap(page + i, false); 2943 page_remove_rmap(page + i, false);
2941 put_page(page + i); 2944 put_page(page + i);
2942 } 2945 }
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 06ae13e869d0..01f2b48c8618 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2630,8 +2630,10 @@ static int __init hugetlb_init(void)
2630 hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 2630 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2631 } 2631 }
2632 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size)); 2632 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2633 if (default_hstate_max_huge_pages) 2633 if (default_hstate_max_huge_pages) {
2634 default_hstate.max_huge_pages = default_hstate_max_huge_pages; 2634 if (!default_hstate.max_huge_pages)
2635 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2636 }
2635 2637
2636 hugetlb_init_hstates(); 2638 hugetlb_init_hstates();
2637 gather_bootmem_prealloc(); 2639 gather_bootmem_prealloc();
diff --git a/mm/memory.c b/mm/memory.c
index 38090ca37a08..906d8e3b42c0 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3425,8 +3425,18 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3425 if (unlikely(pmd_none(*pmd)) && 3425 if (unlikely(pmd_none(*pmd)) &&
3426 unlikely(__pte_alloc(mm, vma, pmd, address))) 3426 unlikely(__pte_alloc(mm, vma, pmd, address)))
3427 return VM_FAULT_OOM; 3427 return VM_FAULT_OOM;
3428 /* if an huge pmd materialized from under us just retry later */ 3428 /*
3429 if (unlikely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd))) 3429 * If a huge pmd materialized under us just retry later. Use
3430 * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
3431 * didn't become pmd_trans_huge under us and then back to pmd_none, as
3432 * a result of MADV_DONTNEED running immediately after a huge pmd fault
3433 * in a different thread of this mm, in turn leading to a misleading
3434 * pmd_trans_huge() retval. All we have to ensure is that it is a
3435 * regular pmd that we can walk with pte_offset_map() and we can do that
3436 * through an atomic read in C, which is what pmd_trans_unstable()
3437 * provides.
3438 */
3439 if (unlikely(pmd_trans_unstable(pmd) || pmd_devmap(*pmd)))
3430 return 0; 3440 return 0;
3431 /* 3441 /*
3432 * A regular pmd is established and it can't morph into a huge pmd 3442 * A regular pmd is established and it can't morph into a huge pmd
diff --git a/mm/migrate.c b/mm/migrate.c
index b1034f9c77e7..3ad0fea5c438 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1582,7 +1582,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
1582 (GFP_HIGHUSER_MOVABLE | 1582 (GFP_HIGHUSER_MOVABLE |
1583 __GFP_THISNODE | __GFP_NOMEMALLOC | 1583 __GFP_THISNODE | __GFP_NOMEMALLOC |
1584 __GFP_NORETRY | __GFP_NOWARN) & 1584 __GFP_NORETRY | __GFP_NOWARN) &
1585 ~(__GFP_IO | __GFP_FS), 0); 1585 ~__GFP_RECLAIM, 0);
1586 1586
1587 return newpage; 1587 return newpage;
1588} 1588}
diff --git a/mm/mmap.c b/mm/mmap.c
index e2e9f48b06c2..90e3b869a8b9 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2664,12 +2664,29 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
2664 if (!vma || !(vma->vm_flags & VM_SHARED)) 2664 if (!vma || !(vma->vm_flags & VM_SHARED))
2665 goto out; 2665 goto out;
2666 2666
2667 if (start < vma->vm_start || start + size > vma->vm_end) 2667 if (start < vma->vm_start)
2668 goto out; 2668 goto out;
2669 2669
2670 if (pgoff == linear_page_index(vma, start)) { 2670 if (start + size > vma->vm_end) {
2671 ret = 0; 2671 struct vm_area_struct *next;
2672 goto out; 2672
2673 for (next = vma->vm_next; next; next = next->vm_next) {
2674 /* hole between vmas ? */
2675 if (next->vm_start != next->vm_prev->vm_end)
2676 goto out;
2677
2678 if (next->vm_file != vma->vm_file)
2679 goto out;
2680
2681 if (next->vm_flags != vma->vm_flags)
2682 goto out;
2683
2684 if (start + size <= next->vm_end)
2685 break;
2686 }
2687
2688 if (!next)
2689 goto out;
2673 } 2690 }
2674 2691
2675 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; 2692 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
@@ -2679,9 +2696,16 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
2679 flags &= MAP_NONBLOCK; 2696 flags &= MAP_NONBLOCK;
2680 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; 2697 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
2681 if (vma->vm_flags & VM_LOCKED) { 2698 if (vma->vm_flags & VM_LOCKED) {
2699 struct vm_area_struct *tmp;
2682 flags |= MAP_LOCKED; 2700 flags |= MAP_LOCKED;
2701
2683 /* drop PG_Mlocked flag for over-mapped range */ 2702 /* drop PG_Mlocked flag for over-mapped range */
2684 munlock_vma_pages_range(vma, start, start + size); 2703 for (tmp = vma; tmp->vm_start >= start + size;
2704 tmp = tmp->vm_next) {
2705 munlock_vma_pages_range(tmp,
2706 max(tmp->vm_start, start),
2707 min(tmp->vm_end, start + size));
2708 }
2685 } 2709 }
2686 2710
2687 file = get_file(vma->vm_file); 2711 file = get_file(vma->vm_file);
diff --git a/mm/slab.c b/mm/slab.c
index 6ecc697a8bc4..621fbcb35a36 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2275,7 +2275,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2275 2275
2276 err = setup_cpu_cache(cachep, gfp); 2276 err = setup_cpu_cache(cachep, gfp);
2277 if (err) { 2277 if (err) {
2278 __kmem_cache_shutdown(cachep); 2278 __kmem_cache_release(cachep);
2279 return err; 2279 return err;
2280 } 2280 }
2281 2281
@@ -2414,12 +2414,13 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
2414 2414
2415int __kmem_cache_shutdown(struct kmem_cache *cachep) 2415int __kmem_cache_shutdown(struct kmem_cache *cachep)
2416{ 2416{
2417 return __kmem_cache_shrink(cachep, false);
2418}
2419
2420void __kmem_cache_release(struct kmem_cache *cachep)
2421{
2417 int i; 2422 int i;
2418 struct kmem_cache_node *n; 2423 struct kmem_cache_node *n;
2419 int rc = __kmem_cache_shrink(cachep, false);
2420
2421 if (rc)
2422 return rc;
2423 2424
2424 free_percpu(cachep->cpu_cache); 2425 free_percpu(cachep->cpu_cache);
2425 2426
@@ -2430,7 +2431,6 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
2430 kfree(n); 2431 kfree(n);
2431 cachep->node[i] = NULL; 2432 cachep->node[i] = NULL;
2432 } 2433 }
2433 return 0;
2434} 2434}
2435 2435
2436/* 2436/*
diff --git a/mm/slab.h b/mm/slab.h
index 834ad240c0bb..2eedacea439d 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -140,6 +140,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
140#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 140#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
141 141
142int __kmem_cache_shutdown(struct kmem_cache *); 142int __kmem_cache_shutdown(struct kmem_cache *);
143void __kmem_cache_release(struct kmem_cache *);
143int __kmem_cache_shrink(struct kmem_cache *, bool); 144int __kmem_cache_shrink(struct kmem_cache *, bool);
144void slab_kmem_cache_release(struct kmem_cache *); 145void slab_kmem_cache_release(struct kmem_cache *);
145 146
diff --git a/mm/slab_common.c b/mm/slab_common.c
index b50aef01ccf7..065b7bdabdc3 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -693,6 +693,7 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s,
693 693
694void slab_kmem_cache_release(struct kmem_cache *s) 694void slab_kmem_cache_release(struct kmem_cache *s)
695{ 695{
696 __kmem_cache_release(s);
696 destroy_memcg_params(s); 697 destroy_memcg_params(s);
697 kfree_const(s->name); 698 kfree_const(s->name);
698 kmem_cache_free(kmem_cache, s); 699 kmem_cache_free(kmem_cache, s);
diff --git a/mm/slob.c b/mm/slob.c
index 17e8f8cc7c53..5ec158054ffe 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -630,6 +630,10 @@ int __kmem_cache_shutdown(struct kmem_cache *c)
630 return 0; 630 return 0;
631} 631}
632 632
633void __kmem_cache_release(struct kmem_cache *c)
634{
635}
636
633int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate) 637int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate)
634{ 638{
635 return 0; 639 return 0;
diff --git a/mm/slub.c b/mm/slub.c
index 2e1355ac056b..d8fbd4a6ed59 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1592,18 +1592,12 @@ static inline void add_partial(struct kmem_cache_node *n,
1592 __add_partial(n, page, tail); 1592 __add_partial(n, page, tail);
1593} 1593}
1594 1594
1595static inline void
1596__remove_partial(struct kmem_cache_node *n, struct page *page)
1597{
1598 list_del(&page->lru);
1599 n->nr_partial--;
1600}
1601
1602static inline void remove_partial(struct kmem_cache_node *n, 1595static inline void remove_partial(struct kmem_cache_node *n,
1603 struct page *page) 1596 struct page *page)
1604{ 1597{
1605 lockdep_assert_held(&n->list_lock); 1598 lockdep_assert_held(&n->list_lock);
1606 __remove_partial(n, page); 1599 list_del(&page->lru);
1600 n->nr_partial--;
1607} 1601}
1608 1602
1609/* 1603/*
@@ -3184,6 +3178,12 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
3184 } 3178 }
3185} 3179}
3186 3180
3181void __kmem_cache_release(struct kmem_cache *s)
3182{
3183 free_percpu(s->cpu_slab);
3184 free_kmem_cache_nodes(s);
3185}
3186
3187static int init_kmem_cache_nodes(struct kmem_cache *s) 3187static int init_kmem_cache_nodes(struct kmem_cache *s)
3188{ 3188{
3189 int node; 3189 int node;
@@ -3443,28 +3443,31 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
3443 3443
3444/* 3444/*
3445 * Attempt to free all partial slabs on a node. 3445 * Attempt to free all partial slabs on a node.
3446 * This is called from kmem_cache_close(). We must be the last thread 3446 * This is called from __kmem_cache_shutdown(). We must take list_lock
3447 * using the cache and therefore we do not need to lock anymore. 3447 * because sysfs file might still access partial list after the shutdowning.
3448 */ 3448 */
3449static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 3449static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3450{ 3450{
3451 struct page *page, *h; 3451 struct page *page, *h;
3452 3452
3453 BUG_ON(irqs_disabled());
3454 spin_lock_irq(&n->list_lock);
3453 list_for_each_entry_safe(page, h, &n->partial, lru) { 3455 list_for_each_entry_safe(page, h, &n->partial, lru) {
3454 if (!page->inuse) { 3456 if (!page->inuse) {
3455 __remove_partial(n, page); 3457 remove_partial(n, page);
3456 discard_slab(s, page); 3458 discard_slab(s, page);
3457 } else { 3459 } else {
3458 list_slab_objects(s, page, 3460 list_slab_objects(s, page,
3459 "Objects remaining in %s on kmem_cache_close()"); 3461 "Objects remaining in %s on __kmem_cache_shutdown()");
3460 } 3462 }
3461 } 3463 }
3464 spin_unlock_irq(&n->list_lock);
3462} 3465}
3463 3466
3464/* 3467/*
3465 * Release all resources used by a slab cache. 3468 * Release all resources used by a slab cache.
3466 */ 3469 */
3467static inline int kmem_cache_close(struct kmem_cache *s) 3470int __kmem_cache_shutdown(struct kmem_cache *s)
3468{ 3471{
3469 int node; 3472 int node;
3470 struct kmem_cache_node *n; 3473 struct kmem_cache_node *n;
@@ -3476,16 +3479,9 @@ static inline int kmem_cache_close(struct kmem_cache *s)
3476 if (n->nr_partial || slabs_node(s, node)) 3479 if (n->nr_partial || slabs_node(s, node))
3477 return 1; 3480 return 1;
3478 } 3481 }
3479 free_percpu(s->cpu_slab);
3480 free_kmem_cache_nodes(s);
3481 return 0; 3482 return 0;
3482} 3483}
3483 3484
3484int __kmem_cache_shutdown(struct kmem_cache *s)
3485{
3486 return kmem_cache_close(s);
3487}
3488
3489/******************************************************************** 3485/********************************************************************
3490 * Kmalloc subsystem 3486 * Kmalloc subsystem
3491 *******************************************************************/ 3487 *******************************************************************/
@@ -3980,7 +3976,7 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
3980 memcg_propagate_slab_attrs(s); 3976 memcg_propagate_slab_attrs(s);
3981 err = sysfs_slab_add(s); 3977 err = sysfs_slab_add(s);
3982 if (err) 3978 if (err)
3983 kmem_cache_close(s); 3979 __kmem_cache_release(s);
3984 3980
3985 return err; 3981 return err;
3986} 3982}
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index d5871ac493eb..f066781be3c8 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1625,7 +1625,7 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1625 1625
1626 rt = atrtr_find(&at_hint); 1626 rt = atrtr_find(&at_hint);
1627 } 1627 }
1628 err = ENETUNREACH; 1628 err = -ENETUNREACH;
1629 if (!rt) 1629 if (!rt)
1630 goto out; 1630 goto out;
1631 1631
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index e6c8382c79ba..ccf70bed0d0c 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -527,11 +527,12 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
527 * gets dereferenced. 527 * gets dereferenced.
528 */ 528 */
529 spin_lock_bh(&bat_priv->gw.list_lock); 529 spin_lock_bh(&bat_priv->gw.list_lock);
530 hlist_del_init_rcu(&gw_node->list); 530 if (!hlist_unhashed(&gw_node->list)) {
531 hlist_del_init_rcu(&gw_node->list);
532 batadv_gw_node_free_ref(gw_node);
533 }
531 spin_unlock_bh(&bat_priv->gw.list_lock); 534 spin_unlock_bh(&bat_priv->gw.list_lock);
532 535
533 batadv_gw_node_free_ref(gw_node);
534
535 curr_gw = batadv_gw_get_selected_gw_node(bat_priv); 536 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
536 if (gw_node == curr_gw) 537 if (gw_node == curr_gw)
537 batadv_gw_reselect(bat_priv); 538 batadv_gw_reselect(bat_priv);
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 01acccc4d218..57f7107169f5 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -76,6 +76,28 @@ out:
76} 76}
77 77
78/** 78/**
79 * batadv_mutual_parents - check if two devices are each others parent
80 * @dev1: 1st net_device
81 * @dev2: 2nd net_device
82 *
83 * veth devices come in pairs and each is the parent of the other!
84 *
85 * Return: true if the devices are each others parent, otherwise false
86 */
87static bool batadv_mutual_parents(const struct net_device *dev1,
88 const struct net_device *dev2)
89{
90 int dev1_parent_iflink = dev_get_iflink(dev1);
91 int dev2_parent_iflink = dev_get_iflink(dev2);
92
93 if (!dev1_parent_iflink || !dev2_parent_iflink)
94 return false;
95
96 return (dev1_parent_iflink == dev2->ifindex) &&
97 (dev2_parent_iflink == dev1->ifindex);
98}
99
100/**
79 * batadv_is_on_batman_iface - check if a device is a batman iface descendant 101 * batadv_is_on_batman_iface - check if a device is a batman iface descendant
80 * @net_dev: the device to check 102 * @net_dev: the device to check
81 * 103 *
@@ -108,6 +130,9 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
108 if (WARN(!parent_dev, "Cannot find parent device")) 130 if (WARN(!parent_dev, "Cannot find parent device"))
109 return false; 131 return false;
110 132
133 if (batadv_mutual_parents(net_dev, parent_dev))
134 return false;
135
111 ret = batadv_is_on_batman_iface(parent_dev); 136 ret = batadv_is_on_batman_iface(parent_dev);
112 137
113 return ret; 138 return ret;
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index cdfc85fa2743..0e80fd1461ab 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -303,9 +303,11 @@ static void batadv_tt_global_size_mod(struct batadv_orig_node *orig_node,
303 303
304 if (atomic_add_return(v, &vlan->tt.num_entries) == 0) { 304 if (atomic_add_return(v, &vlan->tt.num_entries) == 0) {
305 spin_lock_bh(&orig_node->vlan_list_lock); 305 spin_lock_bh(&orig_node->vlan_list_lock);
306 hlist_del_init_rcu(&vlan->list); 306 if (!hlist_unhashed(&vlan->list)) {
307 hlist_del_init_rcu(&vlan->list);
308 batadv_orig_node_vlan_free_ref(vlan);
309 }
307 spin_unlock_bh(&orig_node->vlan_list_lock); 310 spin_unlock_bh(&orig_node->vlan_list_lock);
308 batadv_orig_node_vlan_free_ref(vlan);
309 } 311 }
310 312
311 batadv_orig_node_vlan_free_ref(vlan); 313 batadv_orig_node_vlan_free_ref(vlan);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 47bcef754796..883c821a9e78 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -4112,8 +4112,10 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4112 break; 4112 break;
4113 } 4113 }
4114 4114
4115 *req_complete = bt_cb(skb)->hci.req_complete; 4115 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4116 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; 4116 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4117 else
4118 *req_complete = bt_cb(skb)->hci.req_complete;
4117 kfree_skb(skb); 4119 kfree_skb(skb);
4118 } 4120 }
4119 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); 4121 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 30e105f57f0d..74c278e00225 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -425,8 +425,8 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
425 mp = br_mdb_ip_get(mdb, group); 425 mp = br_mdb_ip_get(mdb, group);
426 if (!mp) { 426 if (!mp) {
427 mp = br_multicast_new_group(br, port, group); 427 mp = br_multicast_new_group(br, port, group);
428 err = PTR_ERR(mp); 428 err = PTR_ERR_OR_ZERO(mp);
429 if (IS_ERR(mp)) 429 if (err)
430 return err; 430 return err;
431 } 431 }
432 432
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index 61d7617d9249..b82440e1fcb4 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -159,7 +159,7 @@ static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt)
159 tmppkt = NULL; 159 tmppkt = NULL;
160 160
161 /* Verify that length is correct */ 161 /* Verify that length is correct */
162 err = EPROTO; 162 err = -EPROTO;
163 if (rfml->pdu_size != cfpkt_getlen(pkt) - RFM_HEAD_SIZE + 1) 163 if (rfml->pdu_size != cfpkt_getlen(pkt) - RFM_HEAD_SIZE + 1)
164 goto out; 164 goto out;
165 } 165 }
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 9cfedf565f5b..9382619a405b 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -1197,6 +1197,13 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
1197 return new_piece; 1197 return new_piece;
1198} 1198}
1199 1199
1200static size_t sizeof_footer(struct ceph_connection *con)
1201{
1202 return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ?
1203 sizeof(struct ceph_msg_footer) :
1204 sizeof(struct ceph_msg_footer_old);
1205}
1206
1200static void prepare_message_data(struct ceph_msg *msg, u32 data_len) 1207static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
1201{ 1208{
1202 BUG_ON(!msg); 1209 BUG_ON(!msg);
@@ -2335,9 +2342,9 @@ static int read_partial_message(struct ceph_connection *con)
2335 ceph_pr_addr(&con->peer_addr.in_addr), 2342 ceph_pr_addr(&con->peer_addr.in_addr),
2336 seq, con->in_seq + 1); 2343 seq, con->in_seq + 1);
2337 con->in_base_pos = -front_len - middle_len - data_len - 2344 con->in_base_pos = -front_len - middle_len - data_len -
2338 sizeof(m->footer); 2345 sizeof_footer(con);
2339 con->in_tag = CEPH_MSGR_TAG_READY; 2346 con->in_tag = CEPH_MSGR_TAG_READY;
2340 return 0; 2347 return 1;
2341 } else if ((s64)seq - (s64)con->in_seq > 1) { 2348 } else if ((s64)seq - (s64)con->in_seq > 1) {
2342 pr_err("read_partial_message bad seq %lld expected %lld\n", 2349 pr_err("read_partial_message bad seq %lld expected %lld\n",
2343 seq, con->in_seq + 1); 2350 seq, con->in_seq + 1);
@@ -2360,10 +2367,10 @@ static int read_partial_message(struct ceph_connection *con)
2360 /* skip this message */ 2367 /* skip this message */
2361 dout("alloc_msg said skip message\n"); 2368 dout("alloc_msg said skip message\n");
2362 con->in_base_pos = -front_len - middle_len - data_len - 2369 con->in_base_pos = -front_len - middle_len - data_len -
2363 sizeof(m->footer); 2370 sizeof_footer(con);
2364 con->in_tag = CEPH_MSGR_TAG_READY; 2371 con->in_tag = CEPH_MSGR_TAG_READY;
2365 con->in_seq++; 2372 con->in_seq++;
2366 return 0; 2373 return 1;
2367 } 2374 }
2368 2375
2369 BUG_ON(!con->in_msg); 2376 BUG_ON(!con->in_msg);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 3534e12683d3..5bc053778fed 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -2853,8 +2853,8 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
2853 mutex_lock(&osdc->request_mutex); 2853 mutex_lock(&osdc->request_mutex);
2854 req = __lookup_request(osdc, tid); 2854 req = __lookup_request(osdc, tid);
2855 if (!req) { 2855 if (!req) {
2856 pr_warn("%s osd%d tid %llu unknown, skipping\n", 2856 dout("%s osd%d tid %llu unknown, skipping\n", __func__,
2857 __func__, osd->o_osd, tid); 2857 osd->o_osd, tid);
2858 m = NULL; 2858 m = NULL;
2859 *skip = 1; 2859 *skip = 1;
2860 goto out; 2860 goto out;
diff --git a/net/core/dev.c b/net/core/dev.c
index 8cba3d852f25..0ef061b2badc 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5379,12 +5379,12 @@ void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5379{ 5379{
5380 struct netdev_adjacent *lower; 5380 struct netdev_adjacent *lower;
5381 5381
5382 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 5382 lower = list_entry(*iter, struct netdev_adjacent, list);
5383 5383
5384 if (&lower->list == &dev->adj_list.lower) 5384 if (&lower->list == &dev->adj_list.lower)
5385 return NULL; 5385 return NULL;
5386 5386
5387 *iter = &lower->list; 5387 *iter = lower->list.next;
5388 5388
5389 return lower->dev; 5389 return lower->dev;
5390} 5390}
@@ -7422,8 +7422,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
7422 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; 7422 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
7423 setup(dev); 7423 setup(dev);
7424 7424
7425 if (!dev->tx_queue_len) 7425 if (!dev->tx_queue_len) {
7426 dev->priv_flags |= IFF_NO_QUEUE; 7426 dev->priv_flags |= IFF_NO_QUEUE;
7427 dev->tx_queue_len = 1;
7428 }
7427 7429
7428 dev->num_tx_queues = txqs; 7430 dev->num_tx_queues = txqs;
7429 dev->real_num_tx_queues = txqs; 7431 dev->real_num_tx_queues = txqs;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index eab81bc80e5c..12e700332010 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -399,6 +399,13 @@ ip_proto_again:
399 goto out_bad; 399 goto out_bad;
400 proto = eth->h_proto; 400 proto = eth->h_proto;
401 nhoff += sizeof(*eth); 401 nhoff += sizeof(*eth);
402
403 /* Cap headers that we access via pointers at the
404 * end of the Ethernet header as our maximum alignment
405 * at that point is only 2 bytes.
406 */
407 if (NET_IP_ALIGN)
408 hlen = nhoff;
402 } 409 }
403 410
404 key_control->flags |= FLOW_DIS_ENCAPSULATION; 411 key_control->flags |= FLOW_DIS_ENCAPSULATION;
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 5684e14932bd..902d606324a0 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -824,26 +824,26 @@ lookup:
824 824
825 if (sk->sk_state == DCCP_NEW_SYN_RECV) { 825 if (sk->sk_state == DCCP_NEW_SYN_RECV) {
826 struct request_sock *req = inet_reqsk(sk); 826 struct request_sock *req = inet_reqsk(sk);
827 struct sock *nsk = NULL; 827 struct sock *nsk;
828 828
829 sk = req->rsk_listener; 829 sk = req->rsk_listener;
830 if (likely(sk->sk_state == DCCP_LISTEN)) { 830 if (unlikely(sk->sk_state != DCCP_LISTEN)) {
831 nsk = dccp_check_req(sk, skb, req);
832 } else {
833 inet_csk_reqsk_queue_drop_and_put(sk, req); 831 inet_csk_reqsk_queue_drop_and_put(sk, req);
834 goto lookup; 832 goto lookup;
835 } 833 }
834 sock_hold(sk);
835 nsk = dccp_check_req(sk, skb, req);
836 if (!nsk) { 836 if (!nsk) {
837 reqsk_put(req); 837 reqsk_put(req);
838 goto discard_it; 838 goto discard_and_relse;
839 } 839 }
840 if (nsk == sk) { 840 if (nsk == sk) {
841 sock_hold(sk);
842 reqsk_put(req); 841 reqsk_put(req);
843 } else if (dccp_child_process(sk, nsk, skb)) { 842 } else if (dccp_child_process(sk, nsk, skb)) {
844 dccp_v4_ctl_send_reset(sk, skb); 843 dccp_v4_ctl_send_reset(sk, skb);
845 goto discard_it; 844 goto discard_and_relse;
846 } else { 845 } else {
846 sock_put(sk);
847 return 0; 847 return 0;
848 } 848 }
849 } 849 }
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 9c6d0508e63a..b8608b71a66d 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -691,26 +691,26 @@ lookup:
691 691
692 if (sk->sk_state == DCCP_NEW_SYN_RECV) { 692 if (sk->sk_state == DCCP_NEW_SYN_RECV) {
693 struct request_sock *req = inet_reqsk(sk); 693 struct request_sock *req = inet_reqsk(sk);
694 struct sock *nsk = NULL; 694 struct sock *nsk;
695 695
696 sk = req->rsk_listener; 696 sk = req->rsk_listener;
697 if (likely(sk->sk_state == DCCP_LISTEN)) { 697 if (unlikely(sk->sk_state != DCCP_LISTEN)) {
698 nsk = dccp_check_req(sk, skb, req);
699 } else {
700 inet_csk_reqsk_queue_drop_and_put(sk, req); 698 inet_csk_reqsk_queue_drop_and_put(sk, req);
701 goto lookup; 699 goto lookup;
702 } 700 }
701 sock_hold(sk);
702 nsk = dccp_check_req(sk, skb, req);
703 if (!nsk) { 703 if (!nsk) {
704 reqsk_put(req); 704 reqsk_put(req);
705 goto discard_it; 705 goto discard_and_relse;
706 } 706 }
707 if (nsk == sk) { 707 if (nsk == sk) {
708 sock_hold(sk);
709 reqsk_put(req); 708 reqsk_put(req);
710 } else if (dccp_child_process(sk, nsk, skb)) { 709 } else if (dccp_child_process(sk, nsk, skb)) {
711 dccp_v6_ctl_send_reset(sk, skb); 710 dccp_v6_ctl_send_reset(sk, skb);
712 goto discard_it; 711 goto discard_and_relse;
713 } else { 712 } else {
713 sock_put(sk);
714 return 0; 714 return 0;
715 } 715 }
716 } 716 }
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 40b9ca72aae3..ab24521beb4d 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1194,7 +1194,6 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
1194 if (ret) { 1194 if (ret) {
1195 netdev_err(master, "error %d registering interface %s\n", 1195 netdev_err(master, "error %d registering interface %s\n",
1196 ret, slave_dev->name); 1196 ret, slave_dev->name);
1197 phy_disconnect(p->phy);
1198 ds->ports[port] = NULL; 1197 ds->ports[port] = NULL;
1199 free_netdev(slave_dev); 1198 free_netdev(slave_dev);
1200 return ret; 1199 return ret;
@@ -1205,6 +1204,7 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
1205 ret = dsa_slave_phy_setup(p, slave_dev); 1204 ret = dsa_slave_phy_setup(p, slave_dev);
1206 if (ret) { 1205 if (ret) {
1207 netdev_err(master, "error %d setting up slave phy\n", ret); 1206 netdev_err(master, "error %d setting up slave phy\n", ret);
1207 unregister_netdev(slave_dev);
1208 free_netdev(slave_dev); 1208 free_netdev(slave_dev);
1209 return ret; 1209 return ret;
1210 } 1210 }
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index cebd9d31e65a..f6303b17546b 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1847,7 +1847,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
1847 if (err < 0) 1847 if (err < 0)
1848 goto errout; 1848 goto errout;
1849 1849
1850 err = EINVAL; 1850 err = -EINVAL;
1851 if (!tb[NETCONFA_IFINDEX]) 1851 if (!tb[NETCONFA_IFINDEX])
1852 goto errout; 1852 goto errout;
1853 1853
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 46b9c887bede..64148914803a 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -789,14 +789,16 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req,
789 reqsk_put(req); 789 reqsk_put(req);
790} 790}
791 791
792void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req, 792struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
793 struct sock *child) 793 struct request_sock *req,
794 struct sock *child)
794{ 795{
795 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; 796 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
796 797
797 spin_lock(&queue->rskq_lock); 798 spin_lock(&queue->rskq_lock);
798 if (unlikely(sk->sk_state != TCP_LISTEN)) { 799 if (unlikely(sk->sk_state != TCP_LISTEN)) {
799 inet_child_forget(sk, req, child); 800 inet_child_forget(sk, req, child);
801 child = NULL;
800 } else { 802 } else {
801 req->sk = child; 803 req->sk = child;
802 req->dl_next = NULL; 804 req->dl_next = NULL;
@@ -808,6 +810,7 @@ void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
808 sk_acceptq_added(sk); 810 sk_acceptq_added(sk);
809 } 811 }
810 spin_unlock(&queue->rskq_lock); 812 spin_unlock(&queue->rskq_lock);
813 return child;
811} 814}
812EXPORT_SYMBOL(inet_csk_reqsk_queue_add); 815EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
813 816
@@ -817,11 +820,8 @@ struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
817 if (own_req) { 820 if (own_req) {
818 inet_csk_reqsk_queue_drop(sk, req); 821 inet_csk_reqsk_queue_drop(sk, req);
819 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); 822 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
820 inet_csk_reqsk_queue_add(sk, req, child); 823 if (inet_csk_reqsk_queue_add(sk, req, child))
821 /* Warning: caller must not call reqsk_put(req); 824 return child;
822 * child stole last reference on it.
823 */
824 return child;
825 } 825 }
826 /* Too bad, another child took ownership of the request, undo. */ 826 /* Too bad, another child took ownership of the request, undo. */
827 bh_unlock_sock(child); 827 bh_unlock_sock(child);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 56fdf4e0dce4..41ba68de46d8 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1054,8 +1054,9 @@ static const struct net_device_ops gre_tap_netdev_ops = {
1054static void ipgre_tap_setup(struct net_device *dev) 1054static void ipgre_tap_setup(struct net_device *dev)
1055{ 1055{
1056 ether_setup(dev); 1056 ether_setup(dev);
1057 dev->netdev_ops = &gre_tap_netdev_ops; 1057 dev->netdev_ops = &gre_tap_netdev_ops;
1058 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1058 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1059 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1059 ip_tunnel_setup(dev, gre_tap_net_id); 1060 ip_tunnel_setup(dev, gre_tap_net_id);
1060} 1061}
1061 1062
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 5f73a7c03e27..a50124260f5a 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -249,6 +249,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
249 switch (cmsg->cmsg_type) { 249 switch (cmsg->cmsg_type) {
250 case IP_RETOPTS: 250 case IP_RETOPTS:
251 err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); 251 err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
252
253 /* Our caller is responsible for freeing ipc->opt */
252 err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg), 254 err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
253 err < 40 ? err : 40); 255 err < 40 ? err : 40);
254 if (err) 256 if (err)
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index c117b21b937d..d3a27165f9cc 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -746,8 +746,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
746 746
747 if (msg->msg_controllen) { 747 if (msg->msg_controllen) {
748 err = ip_cmsg_send(sock_net(sk), msg, &ipc, false); 748 err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
749 if (err) 749 if (unlikely(err)) {
750 kfree(ipc.opt);
750 return err; 751 return err;
752 }
751 if (ipc.opt) 753 if (ipc.opt)
752 free = 1; 754 free = 1;
753 } 755 }
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index bc35f1842512..7113bae4e6a0 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -547,8 +547,10 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
547 547
548 if (msg->msg_controllen) { 548 if (msg->msg_controllen) {
549 err = ip_cmsg_send(net, msg, &ipc, false); 549 err = ip_cmsg_send(net, msg, &ipc, false);
550 if (err) 550 if (unlikely(err)) {
551 kfree(ipc.opt);
551 goto out; 552 goto out;
553 }
552 if (ipc.opt) 554 if (ipc.opt)
553 free = 1; 555 free = 1;
554 } 556 }
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 85f184e429c6..02c62299d717 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -129,6 +129,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
129static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; 129static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
130static int ip_rt_min_advmss __read_mostly = 256; 130static int ip_rt_min_advmss __read_mostly = 256;
131 131
132static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
132/* 133/*
133 * Interface to generic destination cache. 134 * Interface to generic destination cache.
134 */ 135 */
@@ -755,7 +756,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
755 struct fib_nh *nh = &FIB_RES_NH(res); 756 struct fib_nh *nh = &FIB_RES_NH(res);
756 757
757 update_or_create_fnhe(nh, fl4->daddr, new_gw, 758 update_or_create_fnhe(nh, fl4->daddr, new_gw,
758 0, 0); 759 0, jiffies + ip_rt_gc_timeout);
759 } 760 }
760 if (kill_route) 761 if (kill_route)
761 rt->dst.obsolete = DST_OBSOLETE_KILL; 762 rt->dst.obsolete = DST_OBSOLETE_KILL;
@@ -1556,6 +1557,36 @@ static void ip_handle_martian_source(struct net_device *dev,
1556#endif 1557#endif
1557} 1558}
1558 1559
1560static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
1561{
1562 struct fnhe_hash_bucket *hash;
1563 struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1564 u32 hval = fnhe_hashfun(daddr);
1565
1566 spin_lock_bh(&fnhe_lock);
1567
1568 hash = rcu_dereference_protected(nh->nh_exceptions,
1569 lockdep_is_held(&fnhe_lock));
1570 hash += hval;
1571
1572 fnhe_p = &hash->chain;
1573 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1574 while (fnhe) {
1575 if (fnhe->fnhe_daddr == daddr) {
1576 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1577 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1578 fnhe_flush_routes(fnhe);
1579 kfree_rcu(fnhe, rcu);
1580 break;
1581 }
1582 fnhe_p = &fnhe->fnhe_next;
1583 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1584 lockdep_is_held(&fnhe_lock));
1585 }
1586
1587 spin_unlock_bh(&fnhe_lock);
1588}
1589
1559/* called in rcu_read_lock() section */ 1590/* called in rcu_read_lock() section */
1560static int __mkroute_input(struct sk_buff *skb, 1591static int __mkroute_input(struct sk_buff *skb,
1561 const struct fib_result *res, 1592 const struct fib_result *res,
@@ -1609,11 +1640,20 @@ static int __mkroute_input(struct sk_buff *skb,
1609 1640
1610 fnhe = find_exception(&FIB_RES_NH(*res), daddr); 1641 fnhe = find_exception(&FIB_RES_NH(*res), daddr);
1611 if (do_cache) { 1642 if (do_cache) {
1612 if (fnhe) 1643 if (fnhe) {
1613 rth = rcu_dereference(fnhe->fnhe_rth_input); 1644 rth = rcu_dereference(fnhe->fnhe_rth_input);
1614 else 1645 if (rth && rth->dst.expires &&
1615 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); 1646 time_after(jiffies, rth->dst.expires)) {
1647 ip_del_fnhe(&FIB_RES_NH(*res), daddr);
1648 fnhe = NULL;
1649 } else {
1650 goto rt_cache;
1651 }
1652 }
1653
1654 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1616 1655
1656rt_cache:
1617 if (rt_cache_valid(rth)) { 1657 if (rt_cache_valid(rth)) {
1618 skb_dst_set_noref(skb, &rth->dst); 1658 skb_dst_set_noref(skb, &rth->dst);
1619 goto out; 1659 goto out;
@@ -2014,19 +2054,29 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2014 struct fib_nh *nh = &FIB_RES_NH(*res); 2054 struct fib_nh *nh = &FIB_RES_NH(*res);
2015 2055
2016 fnhe = find_exception(nh, fl4->daddr); 2056 fnhe = find_exception(nh, fl4->daddr);
2017 if (fnhe) 2057 if (fnhe) {
2018 prth = &fnhe->fnhe_rth_output; 2058 prth = &fnhe->fnhe_rth_output;
2019 else { 2059 rth = rcu_dereference(*prth);
2020 if (unlikely(fl4->flowi4_flags & 2060 if (rth && rth->dst.expires &&
2021 FLOWI_FLAG_KNOWN_NH && 2061 time_after(jiffies, rth->dst.expires)) {
2022 !(nh->nh_gw && 2062 ip_del_fnhe(nh, fl4->daddr);
2023 nh->nh_scope == RT_SCOPE_LINK))) { 2063 fnhe = NULL;
2024 do_cache = false; 2064 } else {
2025 goto add; 2065 goto rt_cache;
2026 } 2066 }
2027 prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
2028 } 2067 }
2068
2069 if (unlikely(fl4->flowi4_flags &
2070 FLOWI_FLAG_KNOWN_NH &&
2071 !(nh->nh_gw &&
2072 nh->nh_scope == RT_SCOPE_LINK))) {
2073 do_cache = false;
2074 goto add;
2075 }
2076 prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
2029 rth = rcu_dereference(*prth); 2077 rth = rcu_dereference(*prth);
2078
2079rt_cache:
2030 if (rt_cache_valid(rth)) { 2080 if (rt_cache_valid(rth)) {
2031 dst_hold(&rth->dst); 2081 dst_hold(&rth->dst);
2032 return rth; 2082 return rth;
@@ -2569,7 +2619,6 @@ void ip_rt_multicast_event(struct in_device *in_dev)
2569} 2619}
2570 2620
2571#ifdef CONFIG_SYSCTL 2621#ifdef CONFIG_SYSCTL
2572static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
2573static int ip_rt_gc_interval __read_mostly = 60 * HZ; 2622static int ip_rt_gc_interval __read_mostly = 60 * HZ;
2574static int ip_rt_gc_min_interval __read_mostly = HZ / 2; 2623static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
2575static int ip_rt_gc_elasticity __read_mostly = 8; 2624static int ip_rt_gc_elasticity __read_mostly = 8;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0c36ef4a3f86..483ffdf5aa4d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2950,7 +2950,7 @@ static void __tcp_alloc_md5sig_pool(void)
2950 struct crypto_hash *hash; 2950 struct crypto_hash *hash;
2951 2951
2952 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); 2952 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2953 if (IS_ERR_OR_NULL(hash)) 2953 if (IS_ERR(hash))
2954 return; 2954 return;
2955 per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash; 2955 per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash;
2956 } 2956 }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 1c2a73406261..3b2c8e90a475 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2896,7 +2896,10 @@ static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us)
2896{ 2896{
2897 const u32 now = tcp_time_stamp, wlen = sysctl_tcp_min_rtt_wlen * HZ; 2897 const u32 now = tcp_time_stamp, wlen = sysctl_tcp_min_rtt_wlen * HZ;
2898 struct rtt_meas *m = tcp_sk(sk)->rtt_min; 2898 struct rtt_meas *m = tcp_sk(sk)->rtt_min;
2899 struct rtt_meas rttm = { .rtt = (rtt_us ? : 1), .ts = now }; 2899 struct rtt_meas rttm = {
2900 .rtt = likely(rtt_us) ? rtt_us : jiffies_to_usecs(1),
2901 .ts = now,
2902 };
2900 u32 elapsed; 2903 u32 elapsed;
2901 2904
2902 /* Check if the new measurement updates the 1st, 2nd, or 3rd choices */ 2905 /* Check if the new measurement updates the 1st, 2nd, or 3rd choices */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 7f6ff037adaf..487ac67059e2 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1597,28 +1597,30 @@ process:
1597 1597
1598 if (sk->sk_state == TCP_NEW_SYN_RECV) { 1598 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1599 struct request_sock *req = inet_reqsk(sk); 1599 struct request_sock *req = inet_reqsk(sk);
1600 struct sock *nsk = NULL; 1600 struct sock *nsk;
1601 1601
1602 sk = req->rsk_listener; 1602 sk = req->rsk_listener;
1603 if (tcp_v4_inbound_md5_hash(sk, skb)) 1603 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1604 goto discard_and_relse; 1604 reqsk_put(req);
1605 if (likely(sk->sk_state == TCP_LISTEN)) { 1605 goto discard_it;
1606 nsk = tcp_check_req(sk, skb, req, false); 1606 }
1607 } else { 1607 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1608 inet_csk_reqsk_queue_drop_and_put(sk, req); 1608 inet_csk_reqsk_queue_drop_and_put(sk, req);
1609 goto lookup; 1609 goto lookup;
1610 } 1610 }
1611 sock_hold(sk);
1612 nsk = tcp_check_req(sk, skb, req, false);
1611 if (!nsk) { 1613 if (!nsk) {
1612 reqsk_put(req); 1614 reqsk_put(req);
1613 goto discard_it; 1615 goto discard_and_relse;
1614 } 1616 }
1615 if (nsk == sk) { 1617 if (nsk == sk) {
1616 sock_hold(sk);
1617 reqsk_put(req); 1618 reqsk_put(req);
1618 } else if (tcp_child_process(sk, nsk, skb)) { 1619 } else if (tcp_child_process(sk, nsk, skb)) {
1619 tcp_v4_send_reset(nsk, skb); 1620 tcp_v4_send_reset(nsk, skb);
1620 goto discard_it; 1621 goto discard_and_relse;
1621 } else { 1622 } else {
1623 sock_put(sk);
1622 return 0; 1624 return 0;
1623 } 1625 }
1624 } 1626 }
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index be0b21852b13..95d2f198017e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1048,8 +1048,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1048 if (msg->msg_controllen) { 1048 if (msg->msg_controllen) {
1049 err = ip_cmsg_send(sock_net(sk), msg, &ipc, 1049 err = ip_cmsg_send(sock_net(sk), msg, &ipc,
1050 sk->sk_family == AF_INET6); 1050 sk->sk_family == AF_INET6);
1051 if (err) 1051 if (unlikely(err)) {
1052 kfree(ipc.opt);
1052 return err; 1053 return err;
1054 }
1053 if (ipc.opt) 1055 if (ipc.opt)
1054 free = 1; 1056 free = 1;
1055 connected = 0; 1057 connected = 0;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 9efd9ffdc34c..bdd7eac4307a 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -583,7 +583,7 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
583 if (err < 0) 583 if (err < 0)
584 goto errout; 584 goto errout;
585 585
586 err = EINVAL; 586 err = -EINVAL;
587 if (!tb[NETCONFA_IFINDEX]) 587 if (!tb[NETCONFA_IFINDEX])
588 goto errout; 588 goto errout;
589 589
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index f37f18b6b40c..a69aad1e29d1 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1512,6 +1512,7 @@ static void ip6gre_tap_setup(struct net_device *dev)
1512 dev->destructor = ip6gre_dev_free; 1512 dev->destructor = ip6gre_dev_free;
1513 1513
1514 dev->features |= NETIF_F_NETNS_LOCAL; 1514 dev->features |= NETIF_F_NETNS_LOCAL;
1515 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1515} 1516}
1516 1517
1517static int ip6gre_newlink(struct net *src_net, struct net_device *dev, 1518static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
diff --git a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
index 31ba7ca19757..051b6a6bfff6 100644
--- a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
@@ -21,6 +21,10 @@
21#include <net/ipv6.h> 21#include <net/ipv6.h>
22#include <net/netfilter/ipv6/nf_nat_masquerade.h> 22#include <net/netfilter/ipv6/nf_nat_masquerade.h>
23 23
24#define MAX_WORK_COUNT 16
25
26static atomic_t v6_worker_count;
27
24unsigned int 28unsigned int
25nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, 29nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
26 const struct net_device *out) 30 const struct net_device *out)
@@ -78,14 +82,78 @@ static struct notifier_block masq_dev_notifier = {
78 .notifier_call = masq_device_event, 82 .notifier_call = masq_device_event,
79}; 83};
80 84
85struct masq_dev_work {
86 struct work_struct work;
87 struct net *net;
88 int ifindex;
89};
90
91static void iterate_cleanup_work(struct work_struct *work)
92{
93 struct masq_dev_work *w;
94 long index;
95
96 w = container_of(work, struct masq_dev_work, work);
97
98 index = w->ifindex;
99 nf_ct_iterate_cleanup(w->net, device_cmp, (void *)index, 0, 0);
100
101 put_net(w->net);
102 kfree(w);
103 atomic_dec(&v6_worker_count);
104 module_put(THIS_MODULE);
105}
106
107/* ipv6 inet notifier is an atomic notifier, i.e. we cannot
108 * schedule.
109 *
110 * Unfortunately, nf_ct_iterate_cleanup can run for a long
111 * time if there are lots of conntracks and the system
112 * handles high softirq load, so it frequently calls cond_resched
113 * while iterating the conntrack table.
114 *
115 * So we defer nf_ct_iterate_cleanup walk to the system workqueue.
116 *
117 * As we can have 'a lot' of inet_events (depending on amount
118 * of ipv6 addresses being deleted), we also need to add an upper
119 * limit to the number of queued work items.
120 */
81static int masq_inet_event(struct notifier_block *this, 121static int masq_inet_event(struct notifier_block *this,
82 unsigned long event, void *ptr) 122 unsigned long event, void *ptr)
83{ 123{
84 struct inet6_ifaddr *ifa = ptr; 124 struct inet6_ifaddr *ifa = ptr;
85 struct netdev_notifier_info info; 125 const struct net_device *dev;
126 struct masq_dev_work *w;
127 struct net *net;
128
129 if (event != NETDEV_DOWN ||
130 atomic_read(&v6_worker_count) >= MAX_WORK_COUNT)
131 return NOTIFY_DONE;
132
133 dev = ifa->idev->dev;
134 net = maybe_get_net(dev_net(dev));
135 if (!net)
136 return NOTIFY_DONE;
86 137
87 netdev_notifier_info_init(&info, ifa->idev->dev); 138 if (!try_module_get(THIS_MODULE))
88 return masq_device_event(this, event, &info); 139 goto err_module;
140
141 w = kmalloc(sizeof(*w), GFP_ATOMIC);
142 if (w) {
143 atomic_inc(&v6_worker_count);
144
145 INIT_WORK(&w->work, iterate_cleanup_work);
146 w->ifindex = dev->ifindex;
147 w->net = net;
148 schedule_work(&w->work);
149
150 return NOTIFY_DONE;
151 }
152
153 module_put(THIS_MODULE);
154 err_module:
155 put_net(net);
156 return NOTIFY_DONE;
89} 157}
90 158
91static struct notifier_block masq_inet_notifier = { 159static struct notifier_block masq_inet_notifier = {
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 1a5a70fb8551..5c8c84273028 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1387,7 +1387,7 @@ process:
1387 1387
1388 if (sk->sk_state == TCP_NEW_SYN_RECV) { 1388 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1389 struct request_sock *req = inet_reqsk(sk); 1389 struct request_sock *req = inet_reqsk(sk);
1390 struct sock *nsk = NULL; 1390 struct sock *nsk;
1391 1391
1392 sk = req->rsk_listener; 1392 sk = req->rsk_listener;
1393 tcp_v6_fill_cb(skb, hdr, th); 1393 tcp_v6_fill_cb(skb, hdr, th);
@@ -1395,24 +1395,24 @@ process:
1395 reqsk_put(req); 1395 reqsk_put(req);
1396 goto discard_it; 1396 goto discard_it;
1397 } 1397 }
1398 if (likely(sk->sk_state == TCP_LISTEN)) { 1398 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1399 nsk = tcp_check_req(sk, skb, req, false);
1400 } else {
1401 inet_csk_reqsk_queue_drop_and_put(sk, req); 1399 inet_csk_reqsk_queue_drop_and_put(sk, req);
1402 goto lookup; 1400 goto lookup;
1403 } 1401 }
1402 sock_hold(sk);
1403 nsk = tcp_check_req(sk, skb, req, false);
1404 if (!nsk) { 1404 if (!nsk) {
1405 reqsk_put(req); 1405 reqsk_put(req);
1406 goto discard_it; 1406 goto discard_and_relse;
1407 } 1407 }
1408 if (nsk == sk) { 1408 if (nsk == sk) {
1409 sock_hold(sk);
1410 reqsk_put(req); 1409 reqsk_put(req);
1411 tcp_v6_restore_cb(skb); 1410 tcp_v6_restore_cb(skb);
1412 } else if (tcp_child_process(sk, nsk, skb)) { 1411 } else if (tcp_child_process(sk, nsk, skb)) {
1413 tcp_v6_send_reset(nsk, skb); 1412 tcp_v6_send_reset(nsk, skb);
1414 goto discard_it; 1413 goto discard_and_relse;
1415 } else { 1414 } else {
1415 sock_put(sk);
1416 return 0; 1416 return 0;
1417 } 1417 }
1418 } 1418 }
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index f93c5be612a7..2caaa84ce92d 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -124,8 +124,13 @@ static int l2tp_tunnel_notify(struct genl_family *family,
124 ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq, 124 ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
125 NLM_F_ACK, tunnel, cmd); 125 NLM_F_ACK, tunnel, cmd);
126 126
127 if (ret >= 0) 127 if (ret >= 0) {
128 return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC); 128 ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
129 /* We don't care if no one is listening */
130 if (ret == -ESRCH)
131 ret = 0;
132 return ret;
133 }
129 134
130 nlmsg_free(msg); 135 nlmsg_free(msg);
131 136
@@ -147,8 +152,13 @@ static int l2tp_session_notify(struct genl_family *family,
147 ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq, 152 ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
148 NLM_F_ACK, session, cmd); 153 NLM_F_ACK, session, cmd);
149 154
150 if (ret >= 0) 155 if (ret >= 0) {
151 return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC); 156 ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
157 /* We don't care if no one is listening */
158 if (ret == -ESRCH)
159 ret = 0;
160 return ret;
161 }
152 162
153 nlmsg_free(msg); 163 nlmsg_free(msg);
154 164
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 8c067e6663a1..95e757c377f9 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -891,7 +891,7 @@ config NETFILTER_XT_TARGET_TEE
891 depends on IPV6 || IPV6=n 891 depends on IPV6 || IPV6=n
892 depends on !NF_CONNTRACK || NF_CONNTRACK 892 depends on !NF_CONNTRACK || NF_CONNTRACK
893 select NF_DUP_IPV4 893 select NF_DUP_IPV4
894 select NF_DUP_IPV6 if IP6_NF_IPTABLES != n 894 select NF_DUP_IPV6 if IPV6
895 ---help--- 895 ---help---
896 This option adds a "TEE" target with which a packet can be cloned and 896 This option adds a "TEE" target with which a packet can be cloned and
897 this clone be rerouted to another nexthop. 897 this clone be rerouted to another nexthop.
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 58882de06bd7..f60b4fdeeb8c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1412,6 +1412,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1412 } 1412 }
1413 spin_unlock(lockp); 1413 spin_unlock(lockp);
1414 local_bh_enable(); 1414 local_bh_enable();
1415 cond_resched();
1415 } 1416 }
1416 1417
1417 for_each_possible_cpu(cpu) { 1418 for_each_possible_cpu(cpu) {
@@ -1424,6 +1425,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1424 set_bit(IPS_DYING_BIT, &ct->status); 1425 set_bit(IPS_DYING_BIT, &ct->status);
1425 } 1426 }
1426 spin_unlock_bh(&pcpu->lock); 1427 spin_unlock_bh(&pcpu->lock);
1428 cond_resched();
1427 } 1429 }
1428 return NULL; 1430 return NULL;
1429found: 1431found:
@@ -1440,6 +1442,8 @@ void nf_ct_iterate_cleanup(struct net *net,
1440 struct nf_conn *ct; 1442 struct nf_conn *ct;
1441 unsigned int bucket = 0; 1443 unsigned int bucket = 0;
1442 1444
1445 might_sleep();
1446
1443 while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) { 1447 while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
1444 /* Time to push up daises... */ 1448 /* Time to push up daises... */
1445 if (del_timer(&ct->timeout)) 1449 if (del_timer(&ct->timeout))
@@ -1448,6 +1452,7 @@ void nf_ct_iterate_cleanup(struct net *net,
1448 /* ... else the timer will get him soon. */ 1452 /* ... else the timer will get him soon. */
1449 1453
1450 nf_ct_put(ct); 1454 nf_ct_put(ct);
1455 cond_resched();
1451 } 1456 }
1452} 1457}
1453EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); 1458EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index a7ba23353dab..857ae89633af 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -311,14 +311,14 @@ replay:
311#endif 311#endif
312 { 312 {
313 nfnl_unlock(subsys_id); 313 nfnl_unlock(subsys_id);
314 netlink_ack(skb, nlh, -EOPNOTSUPP); 314 netlink_ack(oskb, nlh, -EOPNOTSUPP);
315 return kfree_skb(skb); 315 return kfree_skb(skb);
316 } 316 }
317 } 317 }
318 318
319 if (!ss->commit || !ss->abort) { 319 if (!ss->commit || !ss->abort) {
320 nfnl_unlock(subsys_id); 320 nfnl_unlock(subsys_id);
321 netlink_ack(skb, nlh, -EOPNOTSUPP); 321 netlink_ack(oskb, nlh, -EOPNOTSUPP);
322 return kfree_skb(skb); 322 return kfree_skb(skb);
323 } 323 }
324 324
@@ -328,10 +328,12 @@ replay:
328 nlh = nlmsg_hdr(skb); 328 nlh = nlmsg_hdr(skb);
329 err = 0; 329 err = 0;
330 330
331 if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) || 331 if (nlh->nlmsg_len < NLMSG_HDRLEN ||
332 skb->len < nlh->nlmsg_len) { 332 skb->len < nlh->nlmsg_len ||
333 err = -EINVAL; 333 nlmsg_len(nlh) < sizeof(struct nfgenmsg)) {
334 goto ack; 334 nfnl_err_reset(&err_list);
335 status |= NFNL_BATCH_FAILURE;
336 goto done;
335 } 337 }
336 338
337 /* Only requests are handled by the kernel */ 339 /* Only requests are handled by the kernel */
@@ -406,7 +408,7 @@ ack:
406 * pointing to the batch header. 408 * pointing to the batch header.
407 */ 409 */
408 nfnl_err_reset(&err_list); 410 nfnl_err_reset(&err_list);
409 netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM); 411 netlink_ack(oskb, nlmsg_hdr(oskb), -ENOMEM);
410 status |= NFNL_BATCH_FAILURE; 412 status |= NFNL_BATCH_FAILURE;
411 goto done; 413 goto done;
412 } 414 }
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 94837d236ab0..2671b9deb103 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -312,7 +312,7 @@ static void ctnl_untimeout(struct net *net, struct ctnl_timeout *timeout)
312 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) 312 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
313 untimeout(h, timeout); 313 untimeout(h, timeout);
314 } 314 }
315 nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]); 315 spin_unlock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
316 } 316 }
317 local_bh_enable(); 317 local_bh_enable();
318} 318}
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
index c7808fc19719..c9743f78f219 100644
--- a/net/netfilter/nft_counter.c
+++ b/net/netfilter/nft_counter.c
@@ -100,7 +100,7 @@ static int nft_counter_init(const struct nft_ctx *ctx,
100 100
101 cpu_stats = netdev_alloc_pcpu_stats(struct nft_counter_percpu); 101 cpu_stats = netdev_alloc_pcpu_stats(struct nft_counter_percpu);
102 if (cpu_stats == NULL) 102 if (cpu_stats == NULL)
103 return ENOMEM; 103 return -ENOMEM;
104 104
105 preempt_disable(); 105 preempt_disable();
106 this_cpu = this_cpu_ptr(cpu_stats); 106 this_cpu = this_cpu_ptr(cpu_stats);
@@ -138,7 +138,7 @@ static int nft_counter_clone(struct nft_expr *dst, const struct nft_expr *src)
138 cpu_stats = __netdev_alloc_pcpu_stats(struct nft_counter_percpu, 138 cpu_stats = __netdev_alloc_pcpu_stats(struct nft_counter_percpu,
139 GFP_ATOMIC); 139 GFP_ATOMIC);
140 if (cpu_stats == NULL) 140 if (cpu_stats == NULL)
141 return ENOMEM; 141 return -ENOMEM;
142 142
143 preempt_disable(); 143 preempt_disable();
144 this_cpu = this_cpu_ptr(cpu_stats); 144 this_cpu = this_cpu_ptr(cpu_stats);
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 3eff7b67cdf2..6e57a3966dc5 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -38,7 +38,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
38 return XT_CONTINUE; 38 return XT_CONTINUE;
39} 39}
40 40
41#if IS_ENABLED(CONFIG_NF_DUP_IPV6) 41#if IS_ENABLED(CONFIG_IPV6)
42static unsigned int 42static unsigned int
43tee_tg6(struct sk_buff *skb, const struct xt_action_param *par) 43tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
44{ 44{
@@ -131,7 +131,7 @@ static struct xt_target tee_tg_reg[] __read_mostly = {
131 .destroy = tee_tg_destroy, 131 .destroy = tee_tg_destroy,
132 .me = THIS_MODULE, 132 .me = THIS_MODULE,
133 }, 133 },
134#if IS_ENABLED(CONFIG_NF_DUP_IPV6) 134#if IS_ENABLED(CONFIG_IPV6)
135 { 135 {
136 .name = "TEE", 136 .name = "TEE",
137 .revision = 1, 137 .revision = 1,
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index de9cb19efb6a..5eb7694348b5 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -90,7 +90,7 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
90 int err; 90 int err;
91 struct vxlan_config conf = { 91 struct vxlan_config conf = {
92 .no_share = true, 92 .no_share = true,
93 .flags = VXLAN_F_COLLECT_METADATA, 93 .flags = VXLAN_F_COLLECT_METADATA | VXLAN_F_UDP_ZERO_CSUM6_RX,
94 /* Don't restrict the packets that can be sent by MTU */ 94 /* Don't restrict the packets that can be sent by MTU */
95 .mtu = IP_MAX_MTU, 95 .mtu = IP_MAX_MTU,
96 }; 96 };
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index b5c2cf2aa6d4..af1acf009866 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1852,6 +1852,7 @@ reset:
1852 } 1852 }
1853 1853
1854 tp = old_tp; 1854 tp = old_tp;
1855 protocol = tc_skb_protocol(skb);
1855 goto reclassify; 1856 goto reclassify;
1856#endif 1857#endif
1857} 1858}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index ab0d538a74ed..1099e99a53c4 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -60,6 +60,8 @@
60#include <net/inet_common.h> 60#include <net/inet_common.h>
61#include <net/inet_ecn.h> 61#include <net/inet_ecn.h>
62 62
63#define MAX_SCTP_PORT_HASH_ENTRIES (64 * 1024)
64
63/* Global data structures. */ 65/* Global data structures. */
64struct sctp_globals sctp_globals __read_mostly; 66struct sctp_globals sctp_globals __read_mostly;
65 67
@@ -1355,6 +1357,8 @@ static __init int sctp_init(void)
1355 unsigned long limit; 1357 unsigned long limit;
1356 int max_share; 1358 int max_share;
1357 int order; 1359 int order;
1360 int num_entries;
1361 int max_entry_order;
1358 1362
1359 sock_skb_cb_check_size(sizeof(struct sctp_ulpevent)); 1363 sock_skb_cb_check_size(sizeof(struct sctp_ulpevent));
1360 1364
@@ -1407,14 +1411,24 @@ static __init int sctp_init(void)
1407 1411
1408 /* Size and allocate the association hash table. 1412 /* Size and allocate the association hash table.
1409 * The methodology is similar to that of the tcp hash tables. 1413 * The methodology is similar to that of the tcp hash tables.
1414 * Though not identical. Start by getting a goal size
1410 */ 1415 */
1411 if (totalram_pages >= (128 * 1024)) 1416 if (totalram_pages >= (128 * 1024))
1412 goal = totalram_pages >> (22 - PAGE_SHIFT); 1417 goal = totalram_pages >> (22 - PAGE_SHIFT);
1413 else 1418 else
1414 goal = totalram_pages >> (24 - PAGE_SHIFT); 1419 goal = totalram_pages >> (24 - PAGE_SHIFT);
1415 1420
1416 for (order = 0; (1UL << order) < goal; order++) 1421 /* Then compute the page order for said goal */
1417 ; 1422 order = get_order(goal);
1423
1424 /* Now compute the required page order for the maximum sized table we
1425 * want to create
1426 */
1427 max_entry_order = get_order(MAX_SCTP_PORT_HASH_ENTRIES *
1428 sizeof(struct sctp_bind_hashbucket));
1429
1430 /* Limit the page order by that maximum hash table size */
1431 order = min(order, max_entry_order);
1418 1432
1419 /* Allocate and initialize the endpoint hash table. */ 1433 /* Allocate and initialize the endpoint hash table. */
1420 sctp_ep_hashsize = 64; 1434 sctp_ep_hashsize = 64;
@@ -1430,20 +1444,35 @@ static __init int sctp_init(void)
1430 INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain); 1444 INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
1431 } 1445 }
1432 1446
1433 /* Allocate and initialize the SCTP port hash table. */ 1447 /* Allocate and initialize the SCTP port hash table.
1448 * Note that order is initalized to start at the max sized
1449 * table we want to support. If we can't get that many pages
1450 * reduce the order and try again
1451 */
1434 do { 1452 do {
1435 sctp_port_hashsize = (1UL << order) * PAGE_SIZE /
1436 sizeof(struct sctp_bind_hashbucket);
1437 if ((sctp_port_hashsize > (64 * 1024)) && order > 0)
1438 continue;
1439 sctp_port_hashtable = (struct sctp_bind_hashbucket *) 1453 sctp_port_hashtable = (struct sctp_bind_hashbucket *)
1440 __get_free_pages(GFP_KERNEL | __GFP_NOWARN, order); 1454 __get_free_pages(GFP_KERNEL | __GFP_NOWARN, order);
1441 } while (!sctp_port_hashtable && --order > 0); 1455 } while (!sctp_port_hashtable && --order > 0);
1456
1442 if (!sctp_port_hashtable) { 1457 if (!sctp_port_hashtable) {
1443 pr_err("Failed bind hash alloc\n"); 1458 pr_err("Failed bind hash alloc\n");
1444 status = -ENOMEM; 1459 status = -ENOMEM;
1445 goto err_bhash_alloc; 1460 goto err_bhash_alloc;
1446 } 1461 }
1462
1463 /* Now compute the number of entries that will fit in the
1464 * port hash space we allocated
1465 */
1466 num_entries = (1UL << order) * PAGE_SIZE /
1467 sizeof(struct sctp_bind_hashbucket);
1468
1469 /* And finish by rounding it down to the nearest power of two
1470 * this wastes some memory of course, but its needed because
1471 * the hash function operates based on the assumption that
1472 * that the number of entries is a power of two
1473 */
1474 sctp_port_hashsize = rounddown_pow_of_two(num_entries);
1475
1447 for (i = 0; i < sctp_port_hashsize; i++) { 1476 for (i = 0; i < sctp_port_hashsize; i++) {
1448 spin_lock_init(&sctp_port_hashtable[i].lock); 1477 spin_lock_init(&sctp_port_hashtable[i].lock);
1449 INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain); 1478 INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
@@ -1452,7 +1481,8 @@ static __init int sctp_init(void)
1452 if (sctp_transport_hashtable_init()) 1481 if (sctp_transport_hashtable_init())
1453 goto err_thash_alloc; 1482 goto err_thash_alloc;
1454 1483
1455 pr_info("Hash tables configured (bind %d)\n", sctp_port_hashsize); 1484 pr_info("Hash tables configured (bind %d/%d)\n", sctp_port_hashsize,
1485 num_entries);
1456 1486
1457 sctp_sysctl_register(); 1487 sctp_sysctl_register();
1458 1488
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 799e65b944b9..cabf586f47d7 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -740,7 +740,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
740 default: 740 default:
741 printk(KERN_CRIT "%s: bad return from " 741 printk(KERN_CRIT "%s: bad return from "
742 "gss_fill_context: %zd\n", __func__, err); 742 "gss_fill_context: %zd\n", __func__, err);
743 BUG(); 743 gss_msg->msg.errno = -EIO;
744 } 744 }
745 goto err_release_msg; 745 goto err_release_msg;
746 } 746 }
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 2b32fd602669..273bc3a35425 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1225,7 +1225,7 @@ int qword_get(char **bpp, char *dest, int bufsize)
1225 if (bp[0] == '\\' && bp[1] == 'x') { 1225 if (bp[0] == '\\' && bp[1] == 'x') {
1226 /* HEX STRING */ 1226 /* HEX STRING */
1227 bp += 2; 1227 bp += 2;
1228 while (len < bufsize) { 1228 while (len < bufsize - 1) {
1229 int h, l; 1229 int h, l;
1230 1230
1231 h = hex_to_bin(bp[0]); 1231 h = hex_to_bin(bp[0]);
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index cc1251d07297..2dcd7640eeb5 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -341,6 +341,8 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
341 rqst->rq_reply_bytes_recvd = 0; 341 rqst->rq_reply_bytes_recvd = 0;
342 rqst->rq_bytes_sent = 0; 342 rqst->rq_bytes_sent = 0;
343 rqst->rq_xid = headerp->rm_xid; 343 rqst->rq_xid = headerp->rm_xid;
344
345 rqst->rq_private_buf.len = size;
344 set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state); 346 set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
345 347
346 buf = &rqst->rq_rcv_buf; 348 buf = &rqst->rq_rcv_buf;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 0c2944fb9ae0..347cdc99ed09 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1973,8 +1973,10 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
1973 1973
1974 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 1974 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1975 NLM_F_MULTI, TIPC_NL_LINK_GET); 1975 NLM_F_MULTI, TIPC_NL_LINK_GET);
1976 if (!hdr) 1976 if (!hdr) {
1977 tipc_bcast_unlock(net);
1977 return -EMSGSIZE; 1978 return -EMSGSIZE;
1979 }
1978 1980
1979 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK); 1981 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1980 if (!attrs) 1982 if (!attrs)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index fa97d9649a28..9d7a16fc5ca4 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -346,12 +346,6 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
346 skb_queue_head_init(&n->bc_entry.inputq2); 346 skb_queue_head_init(&n->bc_entry.inputq2);
347 for (i = 0; i < MAX_BEARERS; i++) 347 for (i = 0; i < MAX_BEARERS; i++)
348 spin_lock_init(&n->links[i].lock); 348 spin_lock_init(&n->links[i].lock);
349 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
350 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
351 if (n->addr < temp_node->addr)
352 break;
353 }
354 list_add_tail_rcu(&n->list, &temp_node->list);
355 n->state = SELF_DOWN_PEER_LEAVING; 349 n->state = SELF_DOWN_PEER_LEAVING;
356 n->signature = INVALID_NODE_SIG; 350 n->signature = INVALID_NODE_SIG;
357 n->active_links[0] = INVALID_BEARER_ID; 351 n->active_links[0] = INVALID_BEARER_ID;
@@ -372,6 +366,12 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
372 tipc_node_get(n); 366 tipc_node_get(n);
373 setup_timer(&n->timer, tipc_node_timeout, (unsigned long)n); 367 setup_timer(&n->timer, tipc_node_timeout, (unsigned long)n);
374 n->keepalive_intv = U32_MAX; 368 n->keepalive_intv = U32_MAX;
369 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
370 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
371 if (n->addr < temp_node->addr)
372 break;
373 }
374 list_add_tail_rcu(&n->list, &temp_node->list);
375exit: 375exit:
376 spin_unlock_bh(&tn->node_list_lock); 376 spin_unlock_bh(&tn->node_list_lock);
377 return n; 377 return n;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 29be035f9c65..f75f847e688d 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1781,7 +1781,12 @@ restart_locked:
1781 goto out_unlock; 1781 goto out_unlock;
1782 } 1782 }
1783 1783
1784 if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) { 1784 /* other == sk && unix_peer(other) != sk if
1785 * - unix_peer(sk) == NULL, destination address bound to sk
1786 * - unix_peer(sk) == sk by time of get but disconnected before lock
1787 */
1788 if (other != sk &&
1789 unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
1785 if (timeo) { 1790 if (timeo) {
1786 timeo = unix_wait_for_peer(other, timeo); 1791 timeo = unix_wait_for_peer(other, timeo);
1787 1792
@@ -2277,13 +2282,15 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
2277 size_t size = state->size; 2282 size_t size = state->size;
2278 unsigned int last_len; 2283 unsigned int last_len;
2279 2284
2280 err = -EINVAL; 2285 if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2281 if (sk->sk_state != TCP_ESTABLISHED) 2286 err = -EINVAL;
2282 goto out; 2287 goto out;
2288 }
2283 2289
2284 err = -EOPNOTSUPP; 2290 if (unlikely(flags & MSG_OOB)) {
2285 if (flags & MSG_OOB) 2291 err = -EOPNOTSUPP;
2286 goto out; 2292 goto out;
2293 }
2287 2294
2288 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); 2295 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2289 timeo = sock_rcvtimeo(sk, noblock); 2296 timeo = sock_rcvtimeo(sk, noblock);
@@ -2305,6 +2312,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
2305 bool drop_skb; 2312 bool drop_skb;
2306 struct sk_buff *skb, *last; 2313 struct sk_buff *skb, *last;
2307 2314
2315redo:
2308 unix_state_lock(sk); 2316 unix_state_lock(sk);
2309 if (sock_flag(sk, SOCK_DEAD)) { 2317 if (sock_flag(sk, SOCK_DEAD)) {
2310 err = -ECONNRESET; 2318 err = -ECONNRESET;
@@ -2329,9 +2337,11 @@ again:
2329 goto unlock; 2337 goto unlock;
2330 2338
2331 unix_state_unlock(sk); 2339 unix_state_unlock(sk);
2332 err = -EAGAIN; 2340 if (!timeo) {
2333 if (!timeo) 2341 err = -EAGAIN;
2334 break; 2342 break;
2343 }
2344
2335 mutex_unlock(&u->readlock); 2345 mutex_unlock(&u->readlock);
2336 2346
2337 timeo = unix_stream_data_wait(sk, timeo, last, 2347 timeo = unix_stream_data_wait(sk, timeo, last,
@@ -2344,7 +2354,7 @@ again:
2344 } 2354 }
2345 2355
2346 mutex_lock(&u->readlock); 2356 mutex_lock(&u->readlock);
2347 continue; 2357 goto redo;
2348unlock: 2358unlock:
2349 unix_state_unlock(sk); 2359 unix_state_unlock(sk);
2350 break; 2360 break;
diff --git a/net/unix/diag.c b/net/unix/diag.c
index c512f64d5287..4d9679701a6d 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -220,7 +220,7 @@ done:
220 return skb->len; 220 return skb->len;
221} 221}
222 222
223static struct sock *unix_lookup_by_ino(int ino) 223static struct sock *unix_lookup_by_ino(unsigned int ino)
224{ 224{
225 int i; 225 int i;
226 struct sock *sk; 226 struct sock *sk;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 7fd1220fbfa0..bbe65dcb9738 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1557,8 +1557,6 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1557 if (err < 0) 1557 if (err < 0)
1558 goto out; 1558 goto out;
1559 1559
1560 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1561
1562 while (total_written < len) { 1560 while (total_written < len) {
1563 ssize_t written; 1561 ssize_t written;
1564 1562
@@ -1578,7 +1576,9 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1578 goto out_wait; 1576 goto out_wait;
1579 1577
1580 release_sock(sk); 1578 release_sock(sk);
1579 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1581 timeout = schedule_timeout(timeout); 1580 timeout = schedule_timeout(timeout);
1581 finish_wait(sk_sleep(sk), &wait);
1582 lock_sock(sk); 1582 lock_sock(sk);
1583 if (signal_pending(current)) { 1583 if (signal_pending(current)) {
1584 err = sock_intr_errno(timeout); 1584 err = sock_intr_errno(timeout);
@@ -1588,8 +1588,6 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1588 goto out_wait; 1588 goto out_wait;
1589 } 1589 }
1590 1590
1591 prepare_to_wait(sk_sleep(sk), &wait,
1592 TASK_INTERRUPTIBLE);
1593 } 1591 }
1594 1592
1595 /* These checks occur both as part of and after the loop 1593 /* These checks occur both as part of and after the loop
@@ -1635,7 +1633,6 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1635out_wait: 1633out_wait:
1636 if (total_written > 0) 1634 if (total_written > 0)
1637 err = total_written; 1635 err = total_written;
1638 finish_wait(sk_sleep(sk), &wait);
1639out: 1636out:
1640 release_sock(sk); 1637 release_sock(sk);
1641 return err; 1638 return err;
@@ -1716,7 +1713,6 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1716 if (err < 0) 1713 if (err < 0)
1717 goto out; 1714 goto out;
1718 1715
1719 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1720 1716
1721 while (1) { 1717 while (1) {
1722 s64 ready = vsock_stream_has_data(vsk); 1718 s64 ready = vsock_stream_has_data(vsk);
@@ -1727,7 +1723,7 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1727 */ 1723 */
1728 1724
1729 err = -ENOMEM; 1725 err = -ENOMEM;
1730 goto out_wait; 1726 goto out;
1731 } else if (ready > 0) { 1727 } else if (ready > 0) {
1732 ssize_t read; 1728 ssize_t read;
1733 1729
@@ -1750,7 +1746,7 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1750 vsk, target, read, 1746 vsk, target, read,
1751 !(flags & MSG_PEEK), &recv_data); 1747 !(flags & MSG_PEEK), &recv_data);
1752 if (err < 0) 1748 if (err < 0)
1753 goto out_wait; 1749 goto out;
1754 1750
1755 if (read >= target || flags & MSG_PEEK) 1751 if (read >= target || flags & MSG_PEEK)
1756 break; 1752 break;
@@ -1773,7 +1769,9 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1773 break; 1769 break;
1774 1770
1775 release_sock(sk); 1771 release_sock(sk);
1772 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1776 timeout = schedule_timeout(timeout); 1773 timeout = schedule_timeout(timeout);
1774 finish_wait(sk_sleep(sk), &wait);
1777 lock_sock(sk); 1775 lock_sock(sk);
1778 1776
1779 if (signal_pending(current)) { 1777 if (signal_pending(current)) {
@@ -1783,9 +1781,6 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1783 err = -EAGAIN; 1781 err = -EAGAIN;
1784 break; 1782 break;
1785 } 1783 }
1786
1787 prepare_to_wait(sk_sleep(sk), &wait,
1788 TASK_INTERRUPTIBLE);
1789 } 1784 }
1790 } 1785 }
1791 1786
@@ -1816,8 +1811,6 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1816 err = copied; 1811 err = copied;
1817 } 1812 }
1818 1813
1819out_wait:
1820 finish_wait(sk_sleep(sk), &wait);
1821out: 1814out:
1822 release_sock(sk); 1815 release_sock(sk);
1823 return err; 1816 return err;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index f8110cfd80ff..f1ab71504e1d 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3249,7 +3249,7 @@ static int selinux_inode_listsecurity(struct inode *inode, char *buffer, size_t
3249 3249
3250static void selinux_inode_getsecid(struct inode *inode, u32 *secid) 3250static void selinux_inode_getsecid(struct inode *inode, u32 *secid)
3251{ 3251{
3252 struct inode_security_struct *isec = inode_security(inode); 3252 struct inode_security_struct *isec = inode_security_novalidate(inode);
3253 *secid = isec->sid; 3253 *secid = isec->sid;
3254} 3254}
3255 3255
diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
index b9c0910fb8c4..0608f216f359 100644
--- a/sound/core/control_compat.c
+++ b/sound/core/control_compat.c
@@ -170,6 +170,19 @@ struct snd_ctl_elem_value32 {
170 unsigned char reserved[128]; 170 unsigned char reserved[128];
171}; 171};
172 172
173#ifdef CONFIG_X86_X32
174/* x32 has a different alignment for 64bit values from ia32 */
175struct snd_ctl_elem_value_x32 {
176 struct snd_ctl_elem_id id;
177 unsigned int indirect; /* bit-field causes misalignment */
178 union {
179 s32 integer[128];
180 unsigned char data[512];
181 s64 integer64[64];
182 } value;
183 unsigned char reserved[128];
184};
185#endif /* CONFIG_X86_X32 */
173 186
174/* get the value type and count of the control */ 187/* get the value type and count of the control */
175static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id, 188static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id,
@@ -219,9 +232,11 @@ static int get_elem_size(int type, int count)
219 232
220static int copy_ctl_value_from_user(struct snd_card *card, 233static int copy_ctl_value_from_user(struct snd_card *card,
221 struct snd_ctl_elem_value *data, 234 struct snd_ctl_elem_value *data,
222 struct snd_ctl_elem_value32 __user *data32, 235 void __user *userdata,
236 void __user *valuep,
223 int *typep, int *countp) 237 int *typep, int *countp)
224{ 238{
239 struct snd_ctl_elem_value32 __user *data32 = userdata;
225 int i, type, size; 240 int i, type, size;
226 int uninitialized_var(count); 241 int uninitialized_var(count);
227 unsigned int indirect; 242 unsigned int indirect;
@@ -239,8 +254,9 @@ static int copy_ctl_value_from_user(struct snd_card *card,
239 if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN || 254 if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
240 type == SNDRV_CTL_ELEM_TYPE_INTEGER) { 255 type == SNDRV_CTL_ELEM_TYPE_INTEGER) {
241 for (i = 0; i < count; i++) { 256 for (i = 0; i < count; i++) {
257 s32 __user *intp = valuep;
242 int val; 258 int val;
243 if (get_user(val, &data32->value.integer[i])) 259 if (get_user(val, &intp[i]))
244 return -EFAULT; 260 return -EFAULT;
245 data->value.integer.value[i] = val; 261 data->value.integer.value[i] = val;
246 } 262 }
@@ -250,8 +266,7 @@ static int copy_ctl_value_from_user(struct snd_card *card,
250 dev_err(card->dev, "snd_ioctl32_ctl_elem_value: unknown type %d\n", type); 266 dev_err(card->dev, "snd_ioctl32_ctl_elem_value: unknown type %d\n", type);
251 return -EINVAL; 267 return -EINVAL;
252 } 268 }
253 if (copy_from_user(data->value.bytes.data, 269 if (copy_from_user(data->value.bytes.data, valuep, size))
254 data32->value.data, size))
255 return -EFAULT; 270 return -EFAULT;
256 } 271 }
257 272
@@ -261,7 +276,8 @@ static int copy_ctl_value_from_user(struct snd_card *card,
261} 276}
262 277
263/* restore the value to 32bit */ 278/* restore the value to 32bit */
264static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32, 279static int copy_ctl_value_to_user(void __user *userdata,
280 void __user *valuep,
265 struct snd_ctl_elem_value *data, 281 struct snd_ctl_elem_value *data,
266 int type, int count) 282 int type, int count)
267{ 283{
@@ -270,22 +286,22 @@ static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32,
270 if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN || 286 if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
271 type == SNDRV_CTL_ELEM_TYPE_INTEGER) { 287 type == SNDRV_CTL_ELEM_TYPE_INTEGER) {
272 for (i = 0; i < count; i++) { 288 for (i = 0; i < count; i++) {
289 s32 __user *intp = valuep;
273 int val; 290 int val;
274 val = data->value.integer.value[i]; 291 val = data->value.integer.value[i];
275 if (put_user(val, &data32->value.integer[i])) 292 if (put_user(val, &intp[i]))
276 return -EFAULT; 293 return -EFAULT;
277 } 294 }
278 } else { 295 } else {
279 size = get_elem_size(type, count); 296 size = get_elem_size(type, count);
280 if (copy_to_user(data32->value.data, 297 if (copy_to_user(valuep, data->value.bytes.data, size))
281 data->value.bytes.data, size))
282 return -EFAULT; 298 return -EFAULT;
283 } 299 }
284 return 0; 300 return 0;
285} 301}
286 302
287static int snd_ctl_elem_read_user_compat(struct snd_card *card, 303static int ctl_elem_read_user(struct snd_card *card,
288 struct snd_ctl_elem_value32 __user *data32) 304 void __user *userdata, void __user *valuep)
289{ 305{
290 struct snd_ctl_elem_value *data; 306 struct snd_ctl_elem_value *data;
291 int err, type, count; 307 int err, type, count;
@@ -294,7 +310,9 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card,
294 if (data == NULL) 310 if (data == NULL)
295 return -ENOMEM; 311 return -ENOMEM;
296 312
297 if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0) 313 err = copy_ctl_value_from_user(card, data, userdata, valuep,
314 &type, &count);
315 if (err < 0)
298 goto error; 316 goto error;
299 317
300 snd_power_lock(card); 318 snd_power_lock(card);
@@ -303,14 +321,15 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card,
303 err = snd_ctl_elem_read(card, data); 321 err = snd_ctl_elem_read(card, data);
304 snd_power_unlock(card); 322 snd_power_unlock(card);
305 if (err >= 0) 323 if (err >= 0)
306 err = copy_ctl_value_to_user(data32, data, type, count); 324 err = copy_ctl_value_to_user(userdata, valuep, data,
325 type, count);
307 error: 326 error:
308 kfree(data); 327 kfree(data);
309 return err; 328 return err;
310} 329}
311 330
312static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, 331static int ctl_elem_write_user(struct snd_ctl_file *file,
313 struct snd_ctl_elem_value32 __user *data32) 332 void __user *userdata, void __user *valuep)
314{ 333{
315 struct snd_ctl_elem_value *data; 334 struct snd_ctl_elem_value *data;
316 struct snd_card *card = file->card; 335 struct snd_card *card = file->card;
@@ -320,7 +339,9 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
320 if (data == NULL) 339 if (data == NULL)
321 return -ENOMEM; 340 return -ENOMEM;
322 341
323 if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0) 342 err = copy_ctl_value_from_user(card, data, userdata, valuep,
343 &type, &count);
344 if (err < 0)
324 goto error; 345 goto error;
325 346
326 snd_power_lock(card); 347 snd_power_lock(card);
@@ -329,12 +350,39 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
329 err = snd_ctl_elem_write(card, file, data); 350 err = snd_ctl_elem_write(card, file, data);
330 snd_power_unlock(card); 351 snd_power_unlock(card);
331 if (err >= 0) 352 if (err >= 0)
332 err = copy_ctl_value_to_user(data32, data, type, count); 353 err = copy_ctl_value_to_user(userdata, valuep, data,
354 type, count);
333 error: 355 error:
334 kfree(data); 356 kfree(data);
335 return err; 357 return err;
336} 358}
337 359
360static int snd_ctl_elem_read_user_compat(struct snd_card *card,
361 struct snd_ctl_elem_value32 __user *data32)
362{
363 return ctl_elem_read_user(card, data32, &data32->value);
364}
365
366static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
367 struct snd_ctl_elem_value32 __user *data32)
368{
369 return ctl_elem_write_user(file, data32, &data32->value);
370}
371
372#ifdef CONFIG_X86_X32
373static int snd_ctl_elem_read_user_x32(struct snd_card *card,
374 struct snd_ctl_elem_value_x32 __user *data32)
375{
376 return ctl_elem_read_user(card, data32, &data32->value);
377}
378
379static int snd_ctl_elem_write_user_x32(struct snd_ctl_file *file,
380 struct snd_ctl_elem_value_x32 __user *data32)
381{
382 return ctl_elem_write_user(file, data32, &data32->value);
383}
384#endif /* CONFIG_X86_X32 */
385
338/* add or replace a user control */ 386/* add or replace a user control */
339static int snd_ctl_elem_add_compat(struct snd_ctl_file *file, 387static int snd_ctl_elem_add_compat(struct snd_ctl_file *file,
340 struct snd_ctl_elem_info32 __user *data32, 388 struct snd_ctl_elem_info32 __user *data32,
@@ -393,6 +441,10 @@ enum {
393 SNDRV_CTL_IOCTL_ELEM_WRITE32 = _IOWR('U', 0x13, struct snd_ctl_elem_value32), 441 SNDRV_CTL_IOCTL_ELEM_WRITE32 = _IOWR('U', 0x13, struct snd_ctl_elem_value32),
394 SNDRV_CTL_IOCTL_ELEM_ADD32 = _IOWR('U', 0x17, struct snd_ctl_elem_info32), 442 SNDRV_CTL_IOCTL_ELEM_ADD32 = _IOWR('U', 0x17, struct snd_ctl_elem_info32),
395 SNDRV_CTL_IOCTL_ELEM_REPLACE32 = _IOWR('U', 0x18, struct snd_ctl_elem_info32), 443 SNDRV_CTL_IOCTL_ELEM_REPLACE32 = _IOWR('U', 0x18, struct snd_ctl_elem_info32),
444#ifdef CONFIG_X86_X32
445 SNDRV_CTL_IOCTL_ELEM_READ_X32 = _IOWR('U', 0x12, struct snd_ctl_elem_value_x32),
446 SNDRV_CTL_IOCTL_ELEM_WRITE_X32 = _IOWR('U', 0x13, struct snd_ctl_elem_value_x32),
447#endif /* CONFIG_X86_X32 */
396}; 448};
397 449
398static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) 450static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
@@ -431,6 +483,12 @@ static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, uns
431 return snd_ctl_elem_add_compat(ctl, argp, 0); 483 return snd_ctl_elem_add_compat(ctl, argp, 0);
432 case SNDRV_CTL_IOCTL_ELEM_REPLACE32: 484 case SNDRV_CTL_IOCTL_ELEM_REPLACE32:
433 return snd_ctl_elem_add_compat(ctl, argp, 1); 485 return snd_ctl_elem_add_compat(ctl, argp, 1);
486#ifdef CONFIG_X86_X32
487 case SNDRV_CTL_IOCTL_ELEM_READ_X32:
488 return snd_ctl_elem_read_user_x32(ctl->card, argp);
489 case SNDRV_CTL_IOCTL_ELEM_WRITE_X32:
490 return snd_ctl_elem_write_user_x32(ctl, argp);
491#endif /* CONFIG_X86_X32 */
434 } 492 }
435 493
436 down_read(&snd_ioctl_rwsem); 494 down_read(&snd_ioctl_rwsem);
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index 9630e9f72b7b..1f64ab0c2a95 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -183,6 +183,14 @@ static int snd_pcm_ioctl_channel_info_compat(struct snd_pcm_substream *substream
183 return err; 183 return err;
184} 184}
185 185
186#ifdef CONFIG_X86_X32
187/* X32 ABI has the same struct as x86-64 for snd_pcm_channel_info */
188static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
189 struct snd_pcm_channel_info __user *src);
190#define snd_pcm_ioctl_channel_info_x32(s, p) \
191 snd_pcm_channel_info_user(s, p)
192#endif /* CONFIG_X86_X32 */
193
186struct snd_pcm_status32 { 194struct snd_pcm_status32 {
187 s32 state; 195 s32 state;
188 struct compat_timespec trigger_tstamp; 196 struct compat_timespec trigger_tstamp;
@@ -243,6 +251,71 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
243 return err; 251 return err;
244} 252}
245 253
254#ifdef CONFIG_X86_X32
255/* X32 ABI has 64bit timespec and 64bit alignment */
256struct snd_pcm_status_x32 {
257 s32 state;
258 u32 rsvd; /* alignment */
259 struct timespec trigger_tstamp;
260 struct timespec tstamp;
261 u32 appl_ptr;
262 u32 hw_ptr;
263 s32 delay;
264 u32 avail;
265 u32 avail_max;
266 u32 overrange;
267 s32 suspended_state;
268 u32 audio_tstamp_data;
269 struct timespec audio_tstamp;
270 struct timespec driver_tstamp;
271 u32 audio_tstamp_accuracy;
272 unsigned char reserved[52-2*sizeof(struct timespec)];
273} __packed;
274
275#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst))
276
277static int snd_pcm_status_user_x32(struct snd_pcm_substream *substream,
278 struct snd_pcm_status_x32 __user *src,
279 bool ext)
280{
281 struct snd_pcm_status status;
282 int err;
283
284 memset(&status, 0, sizeof(status));
285 /*
286 * with extension, parameters are read/write,
287 * get audio_tstamp_data from user,
288 * ignore rest of status structure
289 */
290 if (ext && get_user(status.audio_tstamp_data,
291 (u32 __user *)(&src->audio_tstamp_data)))
292 return -EFAULT;
293 err = snd_pcm_status(substream, &status);
294 if (err < 0)
295 return err;
296
297 if (clear_user(src, sizeof(*src)))
298 return -EFAULT;
299 if (put_user(status.state, &src->state) ||
300 put_timespec(&status.trigger_tstamp, &src->trigger_tstamp) ||
301 put_timespec(&status.tstamp, &src->tstamp) ||
302 put_user(status.appl_ptr, &src->appl_ptr) ||
303 put_user(status.hw_ptr, &src->hw_ptr) ||
304 put_user(status.delay, &src->delay) ||
305 put_user(status.avail, &src->avail) ||
306 put_user(status.avail_max, &src->avail_max) ||
307 put_user(status.overrange, &src->overrange) ||
308 put_user(status.suspended_state, &src->suspended_state) ||
309 put_user(status.audio_tstamp_data, &src->audio_tstamp_data) ||
310 put_timespec(&status.audio_tstamp, &src->audio_tstamp) ||
311 put_timespec(&status.driver_tstamp, &src->driver_tstamp) ||
312 put_user(status.audio_tstamp_accuracy, &src->audio_tstamp_accuracy))
313 return -EFAULT;
314
315 return err;
316}
317#endif /* CONFIG_X86_X32 */
318
246/* both for HW_PARAMS and HW_REFINE */ 319/* both for HW_PARAMS and HW_REFINE */
247static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream, 320static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
248 int refine, 321 int refine,
@@ -469,6 +542,93 @@ static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream,
469 return 0; 542 return 0;
470} 543}
471 544
545#ifdef CONFIG_X86_X32
546/* X32 ABI has 64bit timespec and 64bit alignment */
547struct snd_pcm_mmap_status_x32 {
548 s32 state;
549 s32 pad1;
550 u32 hw_ptr;
551 u32 pad2; /* alignment */
552 struct timespec tstamp;
553 s32 suspended_state;
554 struct timespec audio_tstamp;
555} __packed;
556
557struct snd_pcm_mmap_control_x32 {
558 u32 appl_ptr;
559 u32 avail_min;
560};
561
562struct snd_pcm_sync_ptr_x32 {
563 u32 flags;
564 u32 rsvd; /* alignment */
565 union {
566 struct snd_pcm_mmap_status_x32 status;
567 unsigned char reserved[64];
568 } s;
569 union {
570 struct snd_pcm_mmap_control_x32 control;
571 unsigned char reserved[64];
572 } c;
573} __packed;
574
575static int snd_pcm_ioctl_sync_ptr_x32(struct snd_pcm_substream *substream,
576 struct snd_pcm_sync_ptr_x32 __user *src)
577{
578 struct snd_pcm_runtime *runtime = substream->runtime;
579 volatile struct snd_pcm_mmap_status *status;
580 volatile struct snd_pcm_mmap_control *control;
581 u32 sflags;
582 struct snd_pcm_mmap_control scontrol;
583 struct snd_pcm_mmap_status sstatus;
584 snd_pcm_uframes_t boundary;
585 int err;
586
587 if (snd_BUG_ON(!runtime))
588 return -EINVAL;
589
590 if (get_user(sflags, &src->flags) ||
591 get_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
592 get_user(scontrol.avail_min, &src->c.control.avail_min))
593 return -EFAULT;
594 if (sflags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
595 err = snd_pcm_hwsync(substream);
596 if (err < 0)
597 return err;
598 }
599 status = runtime->status;
600 control = runtime->control;
601 boundary = recalculate_boundary(runtime);
602 if (!boundary)
603 boundary = 0x7fffffff;
604 snd_pcm_stream_lock_irq(substream);
605 /* FIXME: we should consider the boundary for the sync from app */
606 if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
607 control->appl_ptr = scontrol.appl_ptr;
608 else
609 scontrol.appl_ptr = control->appl_ptr % boundary;
610 if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
611 control->avail_min = scontrol.avail_min;
612 else
613 scontrol.avail_min = control->avail_min;
614 sstatus.state = status->state;
615 sstatus.hw_ptr = status->hw_ptr % boundary;
616 sstatus.tstamp = status->tstamp;
617 sstatus.suspended_state = status->suspended_state;
618 sstatus.audio_tstamp = status->audio_tstamp;
619 snd_pcm_stream_unlock_irq(substream);
620 if (put_user(sstatus.state, &src->s.status.state) ||
621 put_user(sstatus.hw_ptr, &src->s.status.hw_ptr) ||
622 put_timespec(&sstatus.tstamp, &src->s.status.tstamp) ||
623 put_user(sstatus.suspended_state, &src->s.status.suspended_state) ||
624 put_timespec(&sstatus.audio_tstamp, &src->s.status.audio_tstamp) ||
625 put_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
626 put_user(scontrol.avail_min, &src->c.control.avail_min))
627 return -EFAULT;
628
629 return 0;
630}
631#endif /* CONFIG_X86_X32 */
472 632
473/* 633/*
474 */ 634 */
@@ -487,7 +647,12 @@ enum {
487 SNDRV_PCM_IOCTL_WRITEN_FRAMES32 = _IOW('A', 0x52, struct snd_xfern32), 647 SNDRV_PCM_IOCTL_WRITEN_FRAMES32 = _IOW('A', 0x52, struct snd_xfern32),
488 SNDRV_PCM_IOCTL_READN_FRAMES32 = _IOR('A', 0x53, struct snd_xfern32), 648 SNDRV_PCM_IOCTL_READN_FRAMES32 = _IOR('A', 0x53, struct snd_xfern32),
489 SNDRV_PCM_IOCTL_SYNC_PTR32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr32), 649 SNDRV_PCM_IOCTL_SYNC_PTR32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr32),
490 650#ifdef CONFIG_X86_X32
651 SNDRV_PCM_IOCTL_CHANNEL_INFO_X32 = _IOR('A', 0x32, struct snd_pcm_channel_info),
652 SNDRV_PCM_IOCTL_STATUS_X32 = _IOR('A', 0x20, struct snd_pcm_status_x32),
653 SNDRV_PCM_IOCTL_STATUS_EXT_X32 = _IOWR('A', 0x24, struct snd_pcm_status_x32),
654 SNDRV_PCM_IOCTL_SYNC_PTR_X32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr_x32),
655#endif /* CONFIG_X86_X32 */
491}; 656};
492 657
493static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) 658static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
@@ -559,6 +724,16 @@ static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned l
559 return snd_pcm_ioctl_rewind_compat(substream, argp); 724 return snd_pcm_ioctl_rewind_compat(substream, argp);
560 case SNDRV_PCM_IOCTL_FORWARD32: 725 case SNDRV_PCM_IOCTL_FORWARD32:
561 return snd_pcm_ioctl_forward_compat(substream, argp); 726 return snd_pcm_ioctl_forward_compat(substream, argp);
727#ifdef CONFIG_X86_X32
728 case SNDRV_PCM_IOCTL_STATUS_X32:
729 return snd_pcm_status_user_x32(substream, argp, false);
730 case SNDRV_PCM_IOCTL_STATUS_EXT_X32:
731 return snd_pcm_status_user_x32(substream, argp, true);
732 case SNDRV_PCM_IOCTL_SYNC_PTR_X32:
733 return snd_pcm_ioctl_sync_ptr_x32(substream, argp);
734 case SNDRV_PCM_IOCTL_CHANNEL_INFO_X32:
735 return snd_pcm_ioctl_channel_info_x32(substream, argp);
736#endif /* CONFIG_X86_X32 */
562 } 737 }
563 738
564 return -ENOIOCTLCMD; 739 return -ENOIOCTLCMD;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index fadd3eb8e8bb..9106d8e2300e 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -74,6 +74,18 @@ static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
74static DEFINE_RWLOCK(snd_pcm_link_rwlock); 74static DEFINE_RWLOCK(snd_pcm_link_rwlock);
75static DECLARE_RWSEM(snd_pcm_link_rwsem); 75static DECLARE_RWSEM(snd_pcm_link_rwsem);
76 76
77/* Writer in rwsem may block readers even during its waiting in queue,
78 * and this may lead to a deadlock when the code path takes read sem
79 * twice (e.g. one in snd_pcm_action_nonatomic() and another in
80 * snd_pcm_stream_lock()). As a (suboptimal) workaround, let writer to
81 * spin until it gets the lock.
82 */
83static inline void down_write_nonblock(struct rw_semaphore *lock)
84{
85 while (!down_write_trylock(lock))
86 cond_resched();
87}
88
77/** 89/**
78 * snd_pcm_stream_lock - Lock the PCM stream 90 * snd_pcm_stream_lock - Lock the PCM stream
79 * @substream: PCM substream 91 * @substream: PCM substream
@@ -1813,7 +1825,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
1813 res = -ENOMEM; 1825 res = -ENOMEM;
1814 goto _nolock; 1826 goto _nolock;
1815 } 1827 }
1816 down_write(&snd_pcm_link_rwsem); 1828 down_write_nonblock(&snd_pcm_link_rwsem);
1817 write_lock_irq(&snd_pcm_link_rwlock); 1829 write_lock_irq(&snd_pcm_link_rwlock);
1818 if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN || 1830 if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1819 substream->runtime->status->state != substream1->runtime->status->state || 1831 substream->runtime->status->state != substream1->runtime->status->state ||
@@ -1860,7 +1872,7 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream)
1860 struct snd_pcm_substream *s; 1872 struct snd_pcm_substream *s;
1861 int res = 0; 1873 int res = 0;
1862 1874
1863 down_write(&snd_pcm_link_rwsem); 1875 down_write_nonblock(&snd_pcm_link_rwsem);
1864 write_lock_irq(&snd_pcm_link_rwlock); 1876 write_lock_irq(&snd_pcm_link_rwlock);
1865 if (!snd_pcm_stream_linked(substream)) { 1877 if (!snd_pcm_stream_linked(substream)) {
1866 res = -EALREADY; 1878 res = -EALREADY;
diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c
index 5268c1f58c25..f69764d7cdd7 100644
--- a/sound/core/rawmidi_compat.c
+++ b/sound/core/rawmidi_compat.c
@@ -85,8 +85,7 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile,
85 if (err < 0) 85 if (err < 0)
86 return err; 86 return err;
87 87
88 if (put_user(status.tstamp.tv_sec, &src->tstamp.tv_sec) || 88 if (compat_put_timespec(&status.tstamp, &src->tstamp) ||
89 put_user(status.tstamp.tv_nsec, &src->tstamp.tv_nsec) ||
90 put_user(status.avail, &src->avail) || 89 put_user(status.avail, &src->avail) ||
91 put_user(status.xruns, &src->xruns)) 90 put_user(status.xruns, &src->xruns))
92 return -EFAULT; 91 return -EFAULT;
@@ -94,9 +93,58 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile,
94 return 0; 93 return 0;
95} 94}
96 95
96#ifdef CONFIG_X86_X32
97/* X32 ABI has 64bit timespec and 64bit alignment */
98struct snd_rawmidi_status_x32 {
99 s32 stream;
100 u32 rsvd; /* alignment */
101 struct timespec tstamp;
102 u32 avail;
103 u32 xruns;
104 unsigned char reserved[16];
105} __attribute__((packed));
106
107#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst))
108
109static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile,
110 struct snd_rawmidi_status_x32 __user *src)
111{
112 int err;
113 struct snd_rawmidi_status status;
114
115 if (rfile->output == NULL)
116 return -EINVAL;
117 if (get_user(status.stream, &src->stream))
118 return -EFAULT;
119
120 switch (status.stream) {
121 case SNDRV_RAWMIDI_STREAM_OUTPUT:
122 err = snd_rawmidi_output_status(rfile->output, &status);
123 break;
124 case SNDRV_RAWMIDI_STREAM_INPUT:
125 err = snd_rawmidi_input_status(rfile->input, &status);
126 break;
127 default:
128 return -EINVAL;
129 }
130 if (err < 0)
131 return err;
132
133 if (put_timespec(&status.tstamp, &src->tstamp) ||
134 put_user(status.avail, &src->avail) ||
135 put_user(status.xruns, &src->xruns))
136 return -EFAULT;
137
138 return 0;
139}
140#endif /* CONFIG_X86_X32 */
141
97enum { 142enum {
98 SNDRV_RAWMIDI_IOCTL_PARAMS32 = _IOWR('W', 0x10, struct snd_rawmidi_params32), 143 SNDRV_RAWMIDI_IOCTL_PARAMS32 = _IOWR('W', 0x10, struct snd_rawmidi_params32),
99 SNDRV_RAWMIDI_IOCTL_STATUS32 = _IOWR('W', 0x20, struct snd_rawmidi_status32), 144 SNDRV_RAWMIDI_IOCTL_STATUS32 = _IOWR('W', 0x20, struct snd_rawmidi_status32),
145#ifdef CONFIG_X86_X32
146 SNDRV_RAWMIDI_IOCTL_STATUS_X32 = _IOWR('W', 0x20, struct snd_rawmidi_status_x32),
147#endif /* CONFIG_X86_X32 */
100}; 148};
101 149
102static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) 150static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
@@ -115,6 +163,10 @@ static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsign
115 return snd_rawmidi_ioctl_params_compat(rfile, argp); 163 return snd_rawmidi_ioctl_params_compat(rfile, argp);
116 case SNDRV_RAWMIDI_IOCTL_STATUS32: 164 case SNDRV_RAWMIDI_IOCTL_STATUS32:
117 return snd_rawmidi_ioctl_status_compat(rfile, argp); 165 return snd_rawmidi_ioctl_status_compat(rfile, argp);
166#ifdef CONFIG_X86_X32
167 case SNDRV_RAWMIDI_IOCTL_STATUS_X32:
168 return snd_rawmidi_ioctl_status_x32(rfile, argp);
169#endif /* CONFIG_X86_X32 */
118 } 170 }
119 return -ENOIOCTLCMD; 171 return -ENOIOCTLCMD;
120} 172}
diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
index 8db156b207f1..8cdf489df80e 100644
--- a/sound/core/seq/oss/seq_oss.c
+++ b/sound/core/seq/oss/seq_oss.c
@@ -149,8 +149,6 @@ odev_release(struct inode *inode, struct file *file)
149 if ((dp = file->private_data) == NULL) 149 if ((dp = file->private_data) == NULL)
150 return 0; 150 return 0;
151 151
152 snd_seq_oss_drain_write(dp);
153
154 mutex_lock(&register_mutex); 152 mutex_lock(&register_mutex);
155 snd_seq_oss_release(dp); 153 snd_seq_oss_release(dp);
156 mutex_unlock(&register_mutex); 154 mutex_unlock(&register_mutex);
diff --git a/sound/core/seq/oss/seq_oss_device.h b/sound/core/seq/oss/seq_oss_device.h
index b43924325249..d7b4d016b547 100644
--- a/sound/core/seq/oss/seq_oss_device.h
+++ b/sound/core/seq/oss/seq_oss_device.h
@@ -127,7 +127,6 @@ int snd_seq_oss_write(struct seq_oss_devinfo *dp, const char __user *buf, int co
127unsigned int snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait); 127unsigned int snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait);
128 128
129void snd_seq_oss_reset(struct seq_oss_devinfo *dp); 129void snd_seq_oss_reset(struct seq_oss_devinfo *dp);
130void snd_seq_oss_drain_write(struct seq_oss_devinfo *dp);
131 130
132/* */ 131/* */
133void snd_seq_oss_process_queue(struct seq_oss_devinfo *dp, abstime_t time); 132void snd_seq_oss_process_queue(struct seq_oss_devinfo *dp, abstime_t time);
diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
index 6779e82b46dd..92c96a95a903 100644
--- a/sound/core/seq/oss/seq_oss_init.c
+++ b/sound/core/seq/oss/seq_oss_init.c
@@ -436,22 +436,6 @@ snd_seq_oss_release(struct seq_oss_devinfo *dp)
436 436
437 437
438/* 438/*
439 * Wait until the queue is empty (if we don't have nonblock)
440 */
441void
442snd_seq_oss_drain_write(struct seq_oss_devinfo *dp)
443{
444 if (! dp->timer->running)
445 return;
446 if (is_write_mode(dp->file_mode) && !is_nonblock_mode(dp->file_mode) &&
447 dp->writeq) {
448 while (snd_seq_oss_writeq_sync(dp->writeq))
449 ;
450 }
451}
452
453
454/*
455 * reset sequencer devices 439 * reset sequencer devices
456 */ 440 */
457void 441void
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index 801076687bb1..c850345c43b5 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -383,15 +383,20 @@ int snd_seq_pool_init(struct snd_seq_pool *pool)
383 383
384 if (snd_BUG_ON(!pool)) 384 if (snd_BUG_ON(!pool))
385 return -EINVAL; 385 return -EINVAL;
386 if (pool->ptr) /* should be atomic? */
387 return 0;
388 386
389 pool->ptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size); 387 cellptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
390 if (!pool->ptr) 388 if (!cellptr)
391 return -ENOMEM; 389 return -ENOMEM;
392 390
393 /* add new cells to the free cell list */ 391 /* add new cells to the free cell list */
394 spin_lock_irqsave(&pool->lock, flags); 392 spin_lock_irqsave(&pool->lock, flags);
393 if (pool->ptr) {
394 spin_unlock_irqrestore(&pool->lock, flags);
395 vfree(cellptr);
396 return 0;
397 }
398
399 pool->ptr = cellptr;
395 pool->free = NULL; 400 pool->free = NULL;
396 401
397 for (cell = 0; cell < pool->size; cell++) { 402 for (cell = 0; cell < pool->size; cell++) {
diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
index 921fb2bd8fad..fe686ee41c6d 100644
--- a/sound/core/seq/seq_ports.c
+++ b/sound/core/seq/seq_ports.c
@@ -535,19 +535,22 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client,
535 bool is_src, bool ack) 535 bool is_src, bool ack)
536{ 536{
537 struct snd_seq_port_subs_info *grp; 537 struct snd_seq_port_subs_info *grp;
538 struct list_head *list;
539 bool empty;
538 540
539 grp = is_src ? &port->c_src : &port->c_dest; 541 grp = is_src ? &port->c_src : &port->c_dest;
542 list = is_src ? &subs->src_list : &subs->dest_list;
540 down_write(&grp->list_mutex); 543 down_write(&grp->list_mutex);
541 write_lock_irq(&grp->list_lock); 544 write_lock_irq(&grp->list_lock);
542 if (is_src) 545 empty = list_empty(list);
543 list_del(&subs->src_list); 546 if (!empty)
544 else 547 list_del_init(list);
545 list_del(&subs->dest_list);
546 grp->exclusive = 0; 548 grp->exclusive = 0;
547 write_unlock_irq(&grp->list_lock); 549 write_unlock_irq(&grp->list_lock);
548 up_write(&grp->list_mutex); 550 up_write(&grp->list_mutex);
549 551
550 unsubscribe_port(client, port, grp, &subs->info, ack); 552 if (!empty)
553 unsubscribe_port(client, port, grp, &subs->info, ack);
551} 554}
552 555
553/* connect two ports */ 556/* connect two ports */
diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
index e05802ae6e1b..2e908225d754 100644
--- a/sound/core/timer_compat.c
+++ b/sound/core/timer_compat.c
@@ -70,13 +70,14 @@ static int snd_timer_user_status_compat(struct file *file,
70 struct snd_timer_status32 __user *_status) 70 struct snd_timer_status32 __user *_status)
71{ 71{
72 struct snd_timer_user *tu; 72 struct snd_timer_user *tu;
73 struct snd_timer_status status; 73 struct snd_timer_status32 status;
74 74
75 tu = file->private_data; 75 tu = file->private_data;
76 if (snd_BUG_ON(!tu->timeri)) 76 if (snd_BUG_ON(!tu->timeri))
77 return -ENXIO; 77 return -ENXIO;
78 memset(&status, 0, sizeof(status)); 78 memset(&status, 0, sizeof(status));
79 status.tstamp = tu->tstamp; 79 status.tstamp.tv_sec = tu->tstamp.tv_sec;
80 status.tstamp.tv_nsec = tu->tstamp.tv_nsec;
80 status.resolution = snd_timer_resolution(tu->timeri); 81 status.resolution = snd_timer_resolution(tu->timeri);
81 status.lost = tu->timeri->lost; 82 status.lost = tu->timeri->lost;
82 status.overrun = tu->overrun; 83 status.overrun = tu->overrun;
@@ -88,12 +89,21 @@ static int snd_timer_user_status_compat(struct file *file,
88 return 0; 89 return 0;
89} 90}
90 91
92#ifdef CONFIG_X86_X32
93/* X32 ABI has the same struct as x86-64 */
94#define snd_timer_user_status_x32(file, s) \
95 snd_timer_user_status(file, s)
96#endif /* CONFIG_X86_X32 */
97
91/* 98/*
92 */ 99 */
93 100
94enum { 101enum {
95 SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32), 102 SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32),
96 SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32), 103 SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32),
104#ifdef CONFIG_X86_X32
105 SNDRV_TIMER_IOCTL_STATUS_X32 = _IOW('T', 0x14, struct snd_timer_status),
106#endif /* CONFIG_X86_X32 */
97}; 107};
98 108
99static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) 109static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
@@ -122,6 +132,10 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
122 return snd_timer_user_info_compat(file, argp); 132 return snd_timer_user_info_compat(file, argp);
123 case SNDRV_TIMER_IOCTL_STATUS32: 133 case SNDRV_TIMER_IOCTL_STATUS32:
124 return snd_timer_user_status_compat(file, argp); 134 return snd_timer_user_status_compat(file, argp);
135#ifdef CONFIG_X86_X32
136 case SNDRV_TIMER_IOCTL_STATUS_X32:
137 return snd_timer_user_status_x32(file, argp);
138#endif /* CONFIG_X86_X32 */
125 } 139 }
126 return -ENOIOCTLCMD; 140 return -ENOIOCTLCMD;
127} 141}
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index b5a17cb510a0..8c486235c905 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -426,18 +426,22 @@ EXPORT_SYMBOL_GPL(snd_hdac_bus_stop_chip);
426 * @bus: HD-audio core bus 426 * @bus: HD-audio core bus
427 * @status: INTSTS register value 427 * @status: INTSTS register value
428 * @ask: callback to be called for woken streams 428 * @ask: callback to be called for woken streams
429 *
430 * Returns the bits of handled streams, or zero if no stream is handled.
429 */ 431 */
430void snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status, 432int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
431 void (*ack)(struct hdac_bus *, 433 void (*ack)(struct hdac_bus *,
432 struct hdac_stream *)) 434 struct hdac_stream *))
433{ 435{
434 struct hdac_stream *azx_dev; 436 struct hdac_stream *azx_dev;
435 u8 sd_status; 437 u8 sd_status;
438 int handled = 0;
436 439
437 list_for_each_entry(azx_dev, &bus->stream_list, list) { 440 list_for_each_entry(azx_dev, &bus->stream_list, list) {
438 if (status & azx_dev->sd_int_sta_mask) { 441 if (status & azx_dev->sd_int_sta_mask) {
439 sd_status = snd_hdac_stream_readb(azx_dev, SD_STS); 442 sd_status = snd_hdac_stream_readb(azx_dev, SD_STS);
440 snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); 443 snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK);
444 handled |= 1 << azx_dev->index;
441 if (!azx_dev->substream || !azx_dev->running || 445 if (!azx_dev->substream || !azx_dev->running ||
442 !(sd_status & SD_INT_COMPLETE)) 446 !(sd_status & SD_INT_COMPLETE))
443 continue; 447 continue;
@@ -445,6 +449,7 @@ void snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
445 ack(bus, azx_dev); 449 ack(bus, azx_dev);
446 } 450 }
447 } 451 }
452 return handled;
448} 453}
449EXPORT_SYMBOL_GPL(snd_hdac_bus_handle_stream_irq); 454EXPORT_SYMBOL_GPL(snd_hdac_bus_handle_stream_irq);
450 455
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 37cf9cee9835..27de8015717d 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -930,6 +930,8 @@ irqreturn_t azx_interrupt(int irq, void *dev_id)
930 struct azx *chip = dev_id; 930 struct azx *chip = dev_id;
931 struct hdac_bus *bus = azx_bus(chip); 931 struct hdac_bus *bus = azx_bus(chip);
932 u32 status; 932 u32 status;
933 bool active, handled = false;
934 int repeat = 0; /* count for avoiding endless loop */
933 935
934#ifdef CONFIG_PM 936#ifdef CONFIG_PM
935 if (azx_has_pm_runtime(chip)) 937 if (azx_has_pm_runtime(chip))
@@ -939,33 +941,36 @@ irqreturn_t azx_interrupt(int irq, void *dev_id)
939 941
940 spin_lock(&bus->reg_lock); 942 spin_lock(&bus->reg_lock);
941 943
942 if (chip->disabled) { 944 if (chip->disabled)
943 spin_unlock(&bus->reg_lock); 945 goto unlock;
944 return IRQ_NONE;
945 }
946
947 status = azx_readl(chip, INTSTS);
948 if (status == 0 || status == 0xffffffff) {
949 spin_unlock(&bus->reg_lock);
950 return IRQ_NONE;
951 }
952 946
953 snd_hdac_bus_handle_stream_irq(bus, status, stream_update); 947 do {
948 status = azx_readl(chip, INTSTS);
949 if (status == 0 || status == 0xffffffff)
950 break;
954 951
955 /* clear rirb int */ 952 handled = true;
956 status = azx_readb(chip, RIRBSTS); 953 active = false;
957 if (status & RIRB_INT_MASK) { 954 if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
958 if (status & RIRB_INT_RESPONSE) { 955 active = true;
959 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) 956
960 udelay(80); 957 /* clear rirb int */
961 snd_hdac_bus_update_rirb(bus); 958 status = azx_readb(chip, RIRBSTS);
959 if (status & RIRB_INT_MASK) {
960 active = true;
961 if (status & RIRB_INT_RESPONSE) {
962 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
963 udelay(80);
964 snd_hdac_bus_update_rirb(bus);
965 }
966 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
962 } 967 }
963 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); 968 } while (active && ++repeat < 10);
964 }
965 969
970 unlock:
966 spin_unlock(&bus->reg_lock); 971 spin_unlock(&bus->reg_lock);
967 972
968 return IRQ_HANDLED; 973 return IRQ_RETVAL(handled);
969} 974}
970EXPORT_SYMBOL_GPL(azx_interrupt); 975EXPORT_SYMBOL_GPL(azx_interrupt);
971 976
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 4045dca3d699..e5240cb3749f 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -363,7 +363,10 @@ enum {
363 ((pci)->device == 0x0d0c) || \ 363 ((pci)->device == 0x0d0c) || \
364 ((pci)->device == 0x160c)) 364 ((pci)->device == 0x160c))
365 365
366#define IS_BROXTON(pci) ((pci)->device == 0x5a98) 366#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
367#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
368#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
369#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
367 370
368static char *driver_short_names[] = { 371static char *driver_short_names[] = {
369 [AZX_DRIVER_ICH] = "HDA Intel", 372 [AZX_DRIVER_ICH] = "HDA Intel",
@@ -540,13 +543,13 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
540 543
541 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) 544 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
542 snd_hdac_set_codec_wakeup(bus, true); 545 snd_hdac_set_codec_wakeup(bus, true);
543 if (IS_BROXTON(pci)) { 546 if (IS_SKL_PLUS(pci)) {
544 pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val); 547 pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
545 val = val & ~INTEL_HDA_CGCTL_MISCBDCGE; 548 val = val & ~INTEL_HDA_CGCTL_MISCBDCGE;
546 pci_write_config_dword(pci, INTEL_HDA_CGCTL, val); 549 pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
547 } 550 }
548 azx_init_chip(chip, full_reset); 551 azx_init_chip(chip, full_reset);
549 if (IS_BROXTON(pci)) { 552 if (IS_SKL_PLUS(pci)) {
550 pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val); 553 pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
551 val = val | INTEL_HDA_CGCTL_MISCBDCGE; 554 val = val | INTEL_HDA_CGCTL_MISCBDCGE;
552 pci_write_config_dword(pci, INTEL_HDA_CGCTL, val); 555 pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
@@ -555,7 +558,7 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
555 snd_hdac_set_codec_wakeup(bus, false); 558 snd_hdac_set_codec_wakeup(bus, false);
556 559
557 /* reduce dma latency to avoid noise */ 560 /* reduce dma latency to avoid noise */
558 if (IS_BROXTON(pci)) 561 if (IS_BXT(pci))
559 bxt_reduce_dma_latency(chip); 562 bxt_reduce_dma_latency(chip);
560} 563}
561 564
@@ -977,11 +980,6 @@ static int azx_resume(struct device *dev)
977/* put codec down to D3 at hibernation for Intel SKL+; 980/* put codec down to D3 at hibernation for Intel SKL+;
978 * otherwise BIOS may still access the codec and screw up the driver 981 * otherwise BIOS may still access the codec and screw up the driver
979 */ 982 */
980#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
981#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
982#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
983#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
984
985static int azx_freeze_noirq(struct device *dev) 983static int azx_freeze_noirq(struct device *dev)
986{ 984{
987 struct pci_dev *pci = to_pci_dev(dev); 985 struct pci_dev *pci = to_pci_dev(dev);
@@ -2168,10 +2166,10 @@ static void azx_remove(struct pci_dev *pci)
2168 struct hda_intel *hda; 2166 struct hda_intel *hda;
2169 2167
2170 if (card) { 2168 if (card) {
2171 /* flush the pending probing work */ 2169 /* cancel the pending probing work */
2172 chip = card->private_data; 2170 chip = card->private_data;
2173 hda = container_of(chip, struct hda_intel, chip); 2171 hda = container_of(chip, struct hda_intel, chip);
2174 flush_work(&hda->probe_work); 2172 cancel_work_sync(&hda->probe_work);
2175 2173
2176 snd_card_free(card); 2174 snd_card_free(card);
2177 } 2175 }
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 8ee78dbd4c60..bcbc4ee10130 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -2477,13 +2477,6 @@ static int patch_generic_hdmi(struct hda_codec *codec)
2477 is_broxton(codec)) 2477 is_broxton(codec))
2478 codec->core.link_power_control = 1; 2478 codec->core.link_power_control = 1;
2479 2479
2480 if (codec_has_acomp(codec)) {
2481 codec->depop_delay = 0;
2482 spec->i915_audio_ops.audio_ptr = codec;
2483 spec->i915_audio_ops.pin_eld_notify = intel_pin_eld_notify;
2484 snd_hdac_i915_register_notifier(&spec->i915_audio_ops);
2485 }
2486
2487 if (hdmi_parse_codec(codec) < 0) { 2480 if (hdmi_parse_codec(codec) < 0) {
2488 if (spec->i915_bound) 2481 if (spec->i915_bound)
2489 snd_hdac_i915_exit(&codec->bus->core); 2482 snd_hdac_i915_exit(&codec->bus->core);
@@ -2505,6 +2498,18 @@ static int patch_generic_hdmi(struct hda_codec *codec)
2505 2498
2506 init_channel_allocations(); 2499 init_channel_allocations();
2507 2500
2501 if (codec_has_acomp(codec)) {
2502 codec->depop_delay = 0;
2503 spec->i915_audio_ops.audio_ptr = codec;
2504 /* intel_audio_codec_enable() or intel_audio_codec_disable()
2505 * will call pin_eld_notify with using audio_ptr pointer
2506 * We need make sure audio_ptr is really setup
2507 */
2508 wmb();
2509 spec->i915_audio_ops.pin_eld_notify = intel_pin_eld_notify;
2510 snd_hdac_i915_register_notifier(&spec->i915_audio_ops);
2511 }
2512
2508 return 0; 2513 return 0;
2509} 2514}
2510 2515
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index efd4980cffb8..93d2156b6241 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3801,6 +3801,10 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
3801 3801
3802static void alc_headset_mode_default(struct hda_codec *codec) 3802static void alc_headset_mode_default(struct hda_codec *codec)
3803{ 3803{
3804 static struct coef_fw coef0225[] = {
3805 UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
3806 {}
3807 };
3804 static struct coef_fw coef0255[] = { 3808 static struct coef_fw coef0255[] = {
3805 WRITE_COEF(0x45, 0xc089), 3809 WRITE_COEF(0x45, 0xc089),
3806 WRITE_COEF(0x45, 0xc489), 3810 WRITE_COEF(0x45, 0xc489),
@@ -3842,6 +3846,9 @@ static void alc_headset_mode_default(struct hda_codec *codec)
3842 }; 3846 };
3843 3847
3844 switch (codec->core.vendor_id) { 3848 switch (codec->core.vendor_id) {
3849 case 0x10ec0225:
3850 alc_process_coef_fw(codec, coef0225);
3851 break;
3845 case 0x10ec0255: 3852 case 0x10ec0255:
3846 case 0x10ec0256: 3853 case 0x10ec0256:
3847 alc_process_coef_fw(codec, coef0255); 3854 alc_process_coef_fw(codec, coef0255);
@@ -4749,6 +4756,9 @@ enum {
4749 ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, 4756 ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
4750 ALC293_FIXUP_LENOVO_SPK_NOISE, 4757 ALC293_FIXUP_LENOVO_SPK_NOISE,
4751 ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, 4758 ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
4759 ALC255_FIXUP_DELL_SPK_NOISE,
4760 ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
4761 ALC280_FIXUP_HP_HEADSET_MIC,
4752}; 4762};
4753 4763
4754static const struct hda_fixup alc269_fixups[] = { 4764static const struct hda_fixup alc269_fixups[] = {
@@ -5368,6 +5378,29 @@ static const struct hda_fixup alc269_fixups[] = {
5368 .type = HDA_FIXUP_FUNC, 5378 .type = HDA_FIXUP_FUNC,
5369 .v.func = alc233_fixup_lenovo_line2_mic_hotkey, 5379 .v.func = alc233_fixup_lenovo_line2_mic_hotkey,
5370 }, 5380 },
5381 [ALC255_FIXUP_DELL_SPK_NOISE] = {
5382 .type = HDA_FIXUP_FUNC,
5383 .v.func = alc_fixup_disable_aamix,
5384 .chained = true,
5385 .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
5386 },
5387 [ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = {
5388 .type = HDA_FIXUP_VERBS,
5389 .v.verbs = (const struct hda_verb[]) {
5390 /* Disable pass-through path for FRONT 14h */
5391 { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 },
5392 { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 },
5393 {}
5394 },
5395 .chained = true,
5396 .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
5397 },
5398 [ALC280_FIXUP_HP_HEADSET_MIC] = {
5399 .type = HDA_FIXUP_FUNC,
5400 .v.func = alc_fixup_disable_aamix,
5401 .chained = true,
5402 .chain_id = ALC269_FIXUP_HEADSET_MIC,
5403 },
5371}; 5404};
5372 5405
5373static const struct snd_pci_quirk alc269_fixup_tbl[] = { 5406static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5379,6 +5412,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5379 SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC), 5412 SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
5380 SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK), 5413 SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
5381 SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK), 5414 SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
5415 SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
5382 SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), 5416 SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
5383 SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), 5417 SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
5384 SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK), 5418 SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
@@ -5410,6 +5444,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5410 SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), 5444 SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
5411 SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), 5445 SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
5412 SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), 5446 SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
5447 SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
5413 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5448 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5414 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5449 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5415 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 5450 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5470,6 +5505,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5470 SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 5505 SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
5471 SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 5506 SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
5472 SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 5507 SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
5508 SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
5473 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), 5509 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
5474 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5510 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5475 SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5511 SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -5638,10 +5674,10 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
5638 {0x21, 0x03211020} 5674 {0x21, 0x03211020}
5639 5675
5640static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { 5676static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5641 SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, 5677 SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
5642 ALC225_STANDARD_PINS, 5678 ALC225_STANDARD_PINS,
5643 {0x14, 0x901701a0}), 5679 {0x14, 0x901701a0}),
5644 SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, 5680 SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
5645 ALC225_STANDARD_PINS, 5681 ALC225_STANDARD_PINS,
5646 {0x14, 0x901701b0}), 5682 {0x14, 0x901701b0}),
5647 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE, 5683 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 2875b4f6d8c9..7c8941b8b2de 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -2879,7 +2879,7 @@ static int snd_hdsp_get_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl
2879{ 2879{
2880 struct hdsp *hdsp = snd_kcontrol_chip(kcontrol); 2880 struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
2881 2881
2882 ucontrol->value.enumerated.item[0] = hdsp_dds_offset(hdsp); 2882 ucontrol->value.integer.value[0] = hdsp_dds_offset(hdsp);
2883 return 0; 2883 return 0;
2884} 2884}
2885 2885
@@ -2891,7 +2891,7 @@ static int snd_hdsp_put_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl
2891 2891
2892 if (!snd_hdsp_use_is_exclusive(hdsp)) 2892 if (!snd_hdsp_use_is_exclusive(hdsp))
2893 return -EBUSY; 2893 return -EBUSY;
2894 val = ucontrol->value.enumerated.item[0]; 2894 val = ucontrol->value.integer.value[0];
2895 spin_lock_irq(&hdsp->lock); 2895 spin_lock_irq(&hdsp->lock);
2896 if (val != hdsp_dds_offset(hdsp)) 2896 if (val != hdsp_dds_offset(hdsp))
2897 change = (hdsp_set_dds_offset(hdsp, val) == 0) ? 1 : 0; 2897 change = (hdsp_set_dds_offset(hdsp, val) == 0) ? 1 : 0;
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 8bc8016c173d..a4a999a0317e 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -1601,6 +1601,9 @@ static void hdspm_set_dds_value(struct hdspm *hdspm, int rate)
1601{ 1601{
1602 u64 n; 1602 u64 n;
1603 1603
1604 if (snd_BUG_ON(rate <= 0))
1605 return;
1606
1604 if (rate >= 112000) 1607 if (rate >= 112000)
1605 rate /= 4; 1608 rate /= 4;
1606 else if (rate >= 56000) 1609 else if (rate >= 56000)
@@ -2215,6 +2218,8 @@ static int hdspm_get_system_sample_rate(struct hdspm *hdspm)
2215 } else { 2218 } else {
2216 /* slave mode, return external sample rate */ 2219 /* slave mode, return external sample rate */
2217 rate = hdspm_external_sample_rate(hdspm); 2220 rate = hdspm_external_sample_rate(hdspm);
2221 if (!rate)
2222 rate = hdspm->system_sample_rate;
2218 } 2223 }
2219 } 2224 }
2220 2225
@@ -2260,8 +2265,11 @@ static int snd_hdspm_put_system_sample_rate(struct snd_kcontrol *kcontrol,
2260 ucontrol) 2265 ucontrol)
2261{ 2266{
2262 struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); 2267 struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
2268 int rate = ucontrol->value.integer.value[0];
2263 2269
2264 hdspm_set_dds_value(hdspm, ucontrol->value.enumerated.item[0]); 2270 if (rate < 27000 || rate > 207000)
2271 return -EINVAL;
2272 hdspm_set_dds_value(hdspm, ucontrol->value.integer.value[0]);
2265 return 0; 2273 return 0;
2266} 2274}
2267 2275
@@ -4449,7 +4457,7 @@ static int snd_hdspm_get_tco_word_term(struct snd_kcontrol *kcontrol,
4449{ 4457{
4450 struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); 4458 struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
4451 4459
4452 ucontrol->value.enumerated.item[0] = hdspm->tco->term; 4460 ucontrol->value.integer.value[0] = hdspm->tco->term;
4453 4461
4454 return 0; 4462 return 0;
4455} 4463}
@@ -4460,8 +4468,8 @@ static int snd_hdspm_put_tco_word_term(struct snd_kcontrol *kcontrol,
4460{ 4468{
4461 struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); 4469 struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
4462 4470
4463 if (hdspm->tco->term != ucontrol->value.enumerated.item[0]) { 4471 if (hdspm->tco->term != ucontrol->value.integer.value[0]) {
4464 hdspm->tco->term = ucontrol->value.enumerated.item[0]; 4472 hdspm->tco->term = ucontrol->value.integer.value[0];
4465 4473
4466 hdspm_tco_write(hdspm); 4474 hdspm_tco_write(hdspm);
4467 4475
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 4f6ce1cac8e2..c458d60d5030 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1124,6 +1124,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
1124 case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */ 1124 case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
1125 case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */ 1125 case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
1126 case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */ 1126 case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
1127 case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
1127 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ 1128 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
1128 case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */ 1129 case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
1129 case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */ 1130 case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 90bd2ea41032..b3281dcd4a5d 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -217,13 +217,16 @@ static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
217 return rc; 217 return rc;
218} 218}
219 219
220#define NFIT_TEST_ARS_RECORDS 4
221
220static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd, 222static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
221 unsigned int buf_len) 223 unsigned int buf_len)
222{ 224{
223 if (buf_len < sizeof(*nd_cmd)) 225 if (buf_len < sizeof(*nd_cmd))
224 return -EINVAL; 226 return -EINVAL;
225 227
226 nd_cmd->max_ars_out = 256; 228 nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status)
229 + NFIT_TEST_ARS_RECORDS * sizeof(struct nd_ars_record);
227 nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16; 230 nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
228 231
229 return 0; 232 return 0;
@@ -246,7 +249,8 @@ static int nfit_test_cmd_ars_status(struct nd_cmd_ars_status *nd_cmd,
246 if (buf_len < sizeof(*nd_cmd)) 249 if (buf_len < sizeof(*nd_cmd))
247 return -EINVAL; 250 return -EINVAL;
248 251
249 nd_cmd->out_length = 256; 252 nd_cmd->out_length = sizeof(struct nd_cmd_ars_status);
253 /* TODO: emit error records */
250 nd_cmd->num_records = 0; 254 nd_cmd->num_records = 0;
251 nd_cmd->address = 0; 255 nd_cmd->address = 0;
252 nd_cmd->length = -1ULL; 256 nd_cmd->length = -1ULL;
diff --git a/tools/testing/selftests/ftrace/test.d/instances/instance.tc b/tools/testing/selftests/ftrace/test.d/instances/instance.tc
index 773e276ff90b..1e1abe0ad354 100644
--- a/tools/testing/selftests/ftrace/test.d/instances/instance.tc
+++ b/tools/testing/selftests/ftrace/test.d/instances/instance.tc
@@ -39,28 +39,23 @@ instance_slam() {
39} 39}
40 40
41instance_slam & 41instance_slam &
42x=`jobs -l` 42p1=$!
43p1=`echo $x | cut -d' ' -f2`
44echo $p1 43echo $p1
45 44
46instance_slam & 45instance_slam &
47x=`jobs -l | tail -1` 46p2=$!
48p2=`echo $x | cut -d' ' -f2`
49echo $p2 47echo $p2
50 48
51instance_slam & 49instance_slam &
52x=`jobs -l | tail -1` 50p3=$!
53p3=`echo $x | cut -d' ' -f2`
54echo $p3 51echo $p3
55 52
56instance_slam & 53instance_slam &
57x=`jobs -l | tail -1` 54p4=$!
58p4=`echo $x | cut -d' ' -f2`
59echo $p4 55echo $p4
60 56
61instance_slam & 57instance_slam &
62x=`jobs -l | tail -1` 58p5=$!
63p5=`echo $x | cut -d' ' -f2`
64echo $p5 59echo $p5
65 60
66ls -lR >/dev/null 61ls -lR >/dev/null
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 69bca185c471..ea6064696fe4 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -143,7 +143,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level)
143 * Check if there was a change in the timer state (should we raise or lower 143 * Check if there was a change in the timer state (should we raise or lower
144 * the line level to the GIC). 144 * the line level to the GIC).
145 */ 145 */
146static void kvm_timer_update_state(struct kvm_vcpu *vcpu) 146static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
147{ 147{
148 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 148 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
149 149
@@ -154,10 +154,12 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
154 * until we call this function from kvm_timer_flush_hwstate. 154 * until we call this function from kvm_timer_flush_hwstate.
155 */ 155 */
156 if (!vgic_initialized(vcpu->kvm)) 156 if (!vgic_initialized(vcpu->kvm))
157 return; 157 return -ENODEV;
158 158
159 if (kvm_timer_should_fire(vcpu) != timer->irq.level) 159 if (kvm_timer_should_fire(vcpu) != timer->irq.level)
160 kvm_timer_update_irq(vcpu, !timer->irq.level); 160 kvm_timer_update_irq(vcpu, !timer->irq.level);
161
162 return 0;
161} 163}
162 164
163/* 165/*
@@ -218,7 +220,8 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
218 bool phys_active; 220 bool phys_active;
219 int ret; 221 int ret;
220 222
221 kvm_timer_update_state(vcpu); 223 if (kvm_timer_update_state(vcpu))
224 return;
222 225
223 /* 226 /*
224 * If we enter the guest with the virtual input level to the VGIC 227 * If we enter the guest with the virtual input level to the VGIC
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 043032c6a5a4..00429b392c61 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1875,8 +1875,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
1875static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs) 1875static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
1876{ 1876{
1877 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1877 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1878 1878 int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
1879 int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8; 1879 int sz = nr_longs * sizeof(unsigned long);
1880 vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL); 1880 vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
1881 vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL); 1881 vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
1882 vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL); 1882 vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 353159922456..db2dd3335c6a 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -172,7 +172,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
172 * do alloc nowait since if we are going to sleep anyway we 172 * do alloc nowait since if we are going to sleep anyway we
173 * may as well sleep faulting in page 173 * may as well sleep faulting in page
174 */ 174 */
175 work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT); 175 work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
176 if (!work) 176 if (!work)
177 return 0; 177 return 0;
178 178