aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap3
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt6
-rw-r--r--Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt1
-rw-r--r--Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt2
-rw-r--r--Documentation/devicetree/bindings/pci/hisilicon-pcie.txt10
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt39
-rw-r--r--Documentation/devicetree/bindings/rng/omap_rng.txt3
-rw-r--r--Documentation/devicetree/bindings/sound/cs35l35.txt180
-rw-r--r--Documentation/devicetree/bindings/sound/dioo,dio2125.txt12
-rw-r--r--Documentation/devicetree/bindings/sound/everest,es7134.txt10
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,ssi.txt34
-rw-r--r--Documentation/devicetree/bindings/sound/hisilicon,hi6210-i2s.txt42
-rw-r--r--Documentation/devicetree/bindings/sound/max98925.txt22
-rw-r--r--Documentation/devicetree/bindings/sound/max98926.txt32
-rw-r--r--Documentation/devicetree/bindings/sound/max9892x.txt41
-rw-r--r--Documentation/devicetree/bindings/sound/mt2701-wm8960.txt24
-rw-r--r--Documentation/devicetree/bindings/sound/nau8824.txt88
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip-i2s.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/samsung,odroid.txt57
-rw-r--r--Documentation/devicetree/bindings/sound/sgtl5000.txt9
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-sai.txt89
-rw-r--r--Documentation/devicetree/bindings/sound/tas2552.txt10
-rw-r--r--Documentation/devicetree/bindings/sound/wm8903.txt13
-rw-r--r--Documentation/devicetree/bindings/sound/zte,tdm.txt30
-rw-r--r--Documentation/extcon/intel-int3496.txt5
-rw-r--r--Documentation/filesystems/Locking3
-rw-r--r--Documentation/filesystems/porting6
-rw-r--r--Documentation/filesystems/vfs.txt3
-rw-r--r--Documentation/gcc-plugins.txt4
-rw-r--r--Documentation/pinctrl.txt8
-rw-r--r--Documentation/process/stable-kernel-rules.rst2
-rw-r--r--Documentation/virtual/kvm/api.txt63
-rw-r--r--Documentation/virtual/kvm/devices/arm-vgic.txt6
-rw-r--r--MAINTAINERS62
-rw-r--r--Makefile16
-rw-r--r--arch/alpha/kernel/osf_sys.c2
-rw-r--r--arch/arc/boot/dts/skeleton.dtsi1
-rw-r--r--arch/arc/boot/dts/skeleton_hs.dtsi1
-rw-r--r--arch/arc/boot/dts/skeleton_hs_idu.dtsi21
-rw-r--r--arch/arc/boot/dts/vdk_axs10x_mb.dtsi20
-rw-r--r--arch/arc/include/asm/kprobes.h4
-rw-r--r--arch/arc/kernel/entry-arcv2.S12
-rw-r--r--arch/arc/kernel/setup.c16
-rw-r--r--arch/arc/mm/cache.c3
-rw-r--r--arch/arm/boot/dts/am335x-baltos.dtsi2
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts1
-rw-r--r--arch/arm/boot/dts/am335x-pcm-953.dtsi4
-rw-r--r--arch/arm/boot/dts/am57xx-idk-common.dtsi14
-rw-r--r--arch/arm/boot/dts/bcm5301x.dtsi4
-rw-r--r--arch/arm/boot/dts/bcm953012k.dts5
-rw-r--r--arch/arm/boot/dts/bcm958522er.dts1
-rw-r--r--arch/arm/boot/dts/bcm958525er.dts1
-rw-r--r--arch/arm/boot/dts/bcm958525xmc.dts1
-rw-r--r--arch/arm/boot/dts/bcm958622hr.dts1
-rw-r--r--arch/arm/boot/dts/bcm958623hr.dts1
-rw-r--r--arch/arm/boot/dts/bcm958625hr.dts1
-rw-r--r--arch/arm/boot/dts/bcm988312hr.dts1
-rw-r--r--arch/arm/boot/dts/dra7.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6sx-udoo-neo.dtsi5
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-som.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-dbx5x0.dtsi19
-rw-r--r--arch/arm/boot/dts/ste-href.dtsi9
-rw-r--r--arch/arm/boot/dts/ste-snowball.dts9
-rw-r--r--arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-a23-a33.dtsi2
-rw-r--r--arch/arm/boot/dts/sun8i-a33.dtsi12
-rw-r--r--arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi7
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/kvm/arm.c3
-rw-r--r--arch/arm/kvm/mmu.c23
-rw-r--r--arch/arm/mach-at91/pm.c18
-rw-r--r--arch/arm/mach-omap2/Makefile3
-rw-r--r--arch/arm/mach-omap2/common.h1
-rw-r--r--arch/arm/mach-omap2/gpmc-nand.c154
-rw-r--r--arch/arm/mach-omap2/gpmc-onenand.c10
-rw-r--r--arch/arm/mach-omap2/omap-headsmp.S3
-rw-r--r--arch/arm/mach-omap2/omap-hotplug.c2
-rw-r--r--arch/arm/mach-omap2/omap-mpuss-lowpower.c22
-rw-r--r--arch/arm/mach-omap2/omap-smc.S1
-rw-r--r--arch/arm/mach-omap2/omap-smp.c90
-rw-r--r--arch/arm/mach-omap2/omap_device.c8
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c45
-rw-r--r--arch/arm/mach-orion5x/Kconfig1
-rw-r--r--arch/arm/mm/dma-mapping.c20
-rw-r--r--arch/arm/mm/nommu.c5
-rw-r--r--arch/arm/plat-orion/common.c5
-rw-r--r--arch/arm/probes/kprobes/core.c49
-rw-r--r--arch/arm/probes/kprobes/test-core.c11
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi2
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2.dtsi11
-rw-r--r--arch/arm64/include/asm/current.h2
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h8
-rw-r--r--arch/arm64/kernel/kaslr.c10
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/kernel/vdso/.gitignore1
-rw-r--r--arch/arm64/mm/fault.c42
-rw-r--r--arch/arm64/mm/hugetlbpage.c14
-rw-r--r--arch/c6x/kernel/ptrace.c41
-rw-r--r--arch/h8300/kernel/ptrace.c8
-rw-r--r--arch/ia64/include/asm/asm-prototypes.h29
-rw-r--r--arch/ia64/lib/Makefile16
-rw-r--r--arch/m68k/configs/amiga_defconfig14
-rw-r--r--arch/m68k/configs/apollo_defconfig14
-rw-r--r--arch/m68k/configs/atari_defconfig14
-rw-r--r--arch/m68k/configs/bvme6000_defconfig14
-rw-r--r--arch/m68k/configs/hp300_defconfig14
-rw-r--r--arch/m68k/configs/mac_defconfig14
-rw-r--r--arch/m68k/configs/multi_defconfig14
-rw-r--r--arch/m68k/configs/mvme147_defconfig14
-rw-r--r--arch/m68k/configs/mvme16x_defconfig14
-rw-r--r--arch/m68k/configs/q40_defconfig14
-rw-r--r--arch/m68k/configs/sun3_defconfig14
-rw-r--r--arch/m68k/configs/sun3x_defconfig14
-rw-r--r--arch/m68k/include/asm/bitops.h2
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h1
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/metag/include/asm/uaccess.h15
-rw-r--r--arch/metag/kernel/ptrace.c19
-rw-r--r--arch/metag/lib/usercopy.c312
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/include/asm/fpu.h1
-rw-r--r--arch/mips/include/asm/irq.h15
-rw-r--r--arch/mips/include/asm/spinlock.h8
-rw-r--r--arch/mips/include/uapi/asm/unistd.h15
-rw-r--r--arch/mips/kernel/asm-offsets.c1
-rw-r--r--arch/mips/kernel/cps-vec.S2
-rw-r--r--arch/mips/kernel/cpu-probe.c2
-rw-r--r--arch/mips/kernel/genex.S12
-rw-r--r--arch/mips/kernel/process.c56
-rw-r--r--arch/mips/kernel/ptrace.c3
-rw-r--r--arch/mips/kernel/scall32-o32.S1
-rw-r--r--arch/mips/kernel/scall64-64.S1
-rw-r--r--arch/mips/kernel/scall64-n32.S1
-rw-r--r--arch/mips/kernel/scall64-o32.S1
-rw-r--r--arch/mips/kernel/traps.c17
-rw-r--r--arch/mips/lantiq/xway/sysctrl.c2
-rw-r--r--arch/mips/mm/c-r4k.c2
-rw-r--r--arch/mips/mm/tlbex.c25
-rw-r--r--arch/mips/ralink/rt3883.c4
-rw-r--r--arch/nios2/kernel/prom.c7
-rw-r--r--arch/nios2/kernel/setup.c3
-rw-r--r--arch/parisc/include/asm/uaccess.h139
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c10
-rw-r--r--arch/parisc/kernel/process.c2
-rw-r--r--arch/parisc/lib/Makefile2
-rw-r--r--arch/parisc/lib/fixup.S98
-rw-r--r--arch/parisc/lib/lusercopy.S319
-rw-r--r--arch/parisc/lib/memcpy.c461
-rw-r--r--arch/parisc/mm/fault.c17
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_glue.c3
-rw-r--r--arch/powerpc/include/asm/exception-64s.h8
-rw-r--r--arch/powerpc/kernel/align.c27
-rw-r--r--arch/powerpc/kernel/entry_64.S6
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kernel/idle_book3s.S20
-rw-r--r--arch/powerpc/kernel/misc_64.S4
-rw-r--r--arch/powerpc/kernel/setup_64.c9
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c4
-rw-r--r--arch/powerpc/mm/hash_native_64.c7
-rw-r--r--arch/powerpc/mm/init_64.c3
-rw-r--r--arch/s390/boot/compressed/misc.c35
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/s390/include/asm/sections.h1
-rw-r--r--arch/s390/include/asm/uaccess.h2
-rw-r--r--arch/s390/kernel/smp.c5
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/kvm/gaccess.c7
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/include/asm/page_64.h3
-rw-r--r--arch/sparc/include/asm/pgtable_64.h15
-rw-r--r--arch/sparc/include/asm/processor_32.h6
-rw-r--r--arch/sparc/include/asm/processor_64.h4
-rw-r--r--arch/sparc/kernel/head_64.S4
-rw-r--r--arch/sparc/kernel/misctrap.S1
-rw-r--r--arch/sparc/kernel/ptrace_64.c2
-rw-r--r--arch/sparc/kernel/rtrap_64.S1
-rw-r--r--arch/sparc/kernel/spiterrs.S1
-rw-r--r--arch/sparc/kernel/sun4v_tlb_miss.S1
-rw-r--r--arch/sparc/kernel/urtt_fill.S1
-rw-r--r--arch/sparc/kernel/winfixup.S2
-rw-r--r--arch/sparc/lib/NG2memcpy.S4
-rw-r--r--arch/sparc/lib/NG4memcpy.S1
-rw-r--r--arch/sparc/lib/NG4memset.S1
-rw-r--r--arch/sparc/lib/NGmemcpy.S1
-rw-r--r--arch/sparc/mm/hugetlbpage.c25
-rw-r--r--arch/sparc/mm/init_64.c6
-rw-r--r--arch/sparc/mm/srmmu.c1
-rw-r--r--arch/sparc/mm/tlb.c6
-rw-r--r--arch/sparc/mm/tsb.c4
-rw-r--r--arch/x86/Makefile35
-rw-r--r--arch/x86/Makefile_32.cpu18
-rw-r--r--arch/x86/boot/compressed/error.c1
-rw-r--r--arch/x86/entry/vdso/vdso32-setup.c11
-rw-r--r--arch/x86/events/core.c9
-rw-r--r--arch/x86/events/intel/lbr.c3
-rw-r--r--arch/x86/include/asm/elf.h2
-rw-r--r--arch/x86/include/asm/kvm_page_track.h1
-rw-r--r--arch/x86/include/asm/pmem.h42
-rw-r--r--arch/x86/include/asm/timer.h2
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h8
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c3
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_schemata.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-genpool.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-internal.h2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c24
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c2
-rw-r--r--arch/x86/kernel/ftrace.c6
-rw-r--r--arch/x86/kernel/signal.c2
-rw-r--r--arch/x86/kernel/signal_compat.c4
-rw-r--r--arch/x86/kernel/traps.c4
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/kvm/i8259.c3
-rw-r--r--arch/x86/kvm/ioapic.c3
-rw-r--r--arch/x86/kvm/page_track.c8
-rw-r--r--arch/x86/kvm/svm.c3
-rw-r--r--arch/x86/kvm/vmx.c59
-rw-r--r--arch/x86/kvm/x86.c7
-rw-r--r--arch/x86/lib/memcpy_64.S2
-rw-r--r--arch/x86/mm/init.c41
-rw-r--r--arch/x86/mm/kaslr.c4
-rw-r--r--arch/x86/platform/efi/quirks.c4
-rw-r--r--arch/x86/purgatory/Makefile1
-rw-r--r--arch/xtensa/include/asm/page.h13
-rw-r--r--arch/xtensa/include/uapi/asm/unistd.h5
-rw-r--r--arch/xtensa/kernel/traps.c6
-rw-r--r--block/blk-mq-sched.c181
-rw-r--r--block/blk-mq-sched.h25
-rw-r--r--block/blk-mq.c114
-rw-r--r--block/blk-mq.h2
-rw-r--r--block/blk-stat.c4
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/elevator.c126
-rw-r--r--crypto/ahash.c79
-rw-r--r--crypto/algif_aead.c12
-rw-r--r--crypto/lrw.c23
-rw-r--r--crypto/xts.c23
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_platform.c8
-rw-r--r--drivers/acpi/acpica/utresrc.c17
-rw-r--r--drivers/acpi/apei/ghes.c1
-rw-r--r--drivers/acpi/glue.c12
-rw-r--r--drivers/acpi/ioapic.c6
-rw-r--r--drivers/acpi/nfit/core.c6
-rw-r--r--drivers/acpi/power.c1
-rw-r--r--drivers/acpi/scan.c19
-rw-r--r--drivers/acpi/spcr.c2
-rw-r--r--drivers/ata/pata_atiixp.c5
-rw-r--r--drivers/ata/sata_via.c18
-rw-r--r--drivers/auxdisplay/img-ascii-lcd.c1
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c2
-rw-r--r--drivers/block/nbd.c136
-rw-r--r--drivers/block/zram/zram_drv.c6
-rw-r--r--drivers/bluetooth/Kconfig3
-rw-r--r--drivers/char/hw_random/amd-rng.c42
-rw-r--r--drivers/char/hw_random/geode-rng.c50
-rw-r--r--drivers/char/mem.c82
-rw-r--r--drivers/char/ppdev.c11
-rw-r--r--drivers/char/virtio_console.c6
-rw-r--r--drivers/clk/clk-stm32f4.c13
-rw-r--r--drivers/clk/clk.c3
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c9
-rw-r--r--drivers/clk/sunxi-ng/Kconfig3
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a33.c11
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.c49
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.h12
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c8
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c2
-rw-r--r--drivers/clocksource/clkevt-probe.c2
-rw-r--r--drivers/cpufreq/cpufreq.c59
-rw-r--r--drivers/cpufreq/intel_pstate.c167
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c18
-rw-r--r--drivers/cpuidle/sysfs.c12
-rw-r--r--drivers/crypto/caam/caampkc.c2
-rw-r--r--drivers/crypto/caam/ctrl.c66
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c1
-rw-r--r--drivers/crypto/ccp/ccp-dev.c5
-rw-r--r--drivers/crypto/ccp/ccp-dev.h5
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c42
-rw-r--r--drivers/dax/Kconfig1
-rw-r--r--drivers/dax/dax.c13
-rw-r--r--drivers/dma/bcm2835-dma.c5
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/edac/Kconfig10
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/i5000_edac.c2
-rw-r--r--drivers/edac/i5400_edac.c5
-rw-r--r--drivers/edac/pnd2_edac.c1546
-rw-r--r--drivers/edac/pnd2_edac.h301
-rw-r--r--drivers/edac/xgene_edac.c2
-rw-r--r--drivers/extcon/Kconfig2
-rw-r--r--drivers/extcon/extcon-intel-int3496.c39
-rw-r--r--drivers/firmware/efi/efi.c1
-rw-r--r--drivers/firmware/efi/esrt.c2
-rw-r--r--drivers/firmware/efi/libstub/gop.c6
-rw-r--r--drivers/gpio/gpio-altera-a10sr.c2
-rw-r--r--drivers/gpio/gpio-altera.c26
-rw-r--r--drivers/gpio/gpio-mcp23s08.c65
-rw-r--r--drivers/gpio/gpio-mockup.c7
-rw-r--r--drivers/gpio/gpio-xgene.c13
-rw-r--r--drivers/gpio/gpiolib-acpi.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c10
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c87
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c24
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c109
-rw-r--r--drivers/gpu/drm/i915/gvt/debug.h8
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c32
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c82
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h7
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c55
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c44
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c38
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c113
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c45
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c9
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c26
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c56
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c28
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c83
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c5
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c11
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c9
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c5
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c24
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c14
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c60
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c5
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h11
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c29
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c14
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c79
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c31
-rw-r--r--drivers/hid/Kconfig5
-rw-r--r--drivers/hid/hid-chicony.c1
-rw-r--r--drivers/hid/hid-core.c7
-rw-r--r--drivers/hid/hid-corsair.c47
-rw-r--r--drivers/hid/hid-ids.h8
-rw-r--r--drivers/hid/hid-sony.c2
-rw-r--r--drivers/hid/hid-uclogic.c2
-rw-r--r--drivers/hid/hid-xinmo.c1
-rw-r--r--drivers/hid/usbhid/hid-quirks.c3
-rw-r--r--drivers/hid/wacom_sys.c22
-rw-r--r--drivers/hid/wacom_wac.c22
-rw-r--r--drivers/hv/channel.c25
-rw-r--r--drivers/hv/channel_mgmt.c27
-rw-r--r--drivers/hv/hv_fcopy.c4
-rw-r--r--drivers/hv/hv_kvp.c4
-rw-r--r--drivers/hv/hv_snapshot.c4
-rw-r--r--drivers/hv/hv_util.c2
-rw-r--r--drivers/hv/hv_utils_transport.c12
-rw-r--r--drivers/hv/hv_utils_transport.h1
-rw-r--r--drivers/hv/vmbus_drv.c6
-rw-r--r--drivers/hwmon/asus_atk0110.c3
-rw-r--r--drivers/hwmon/it87.c24
-rw-r--r--drivers/hwmon/max31790.c2
-rw-r--r--drivers/hwtracing/intel_th/core.c4
-rw-r--r--drivers/hwtracing/intel_th/pci.c10
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c34
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c3
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c13
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c4
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c10
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c6
-rw-r--r--drivers/iio/gyro/bmg160_core.c12
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c4
-rw-r--r--drivers/iio/industrialio-core.c7
-rw-r--r--drivers/iio/magnetometer/ak8974.c4
-rw-r--r--drivers/iio/pressure/st_pressure_core.c1
-rw-r--r--drivers/infiniband/core/cq.c10
-rw-r--r--drivers/infiniband/core/device.c29
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c8
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c17
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c42
-rw-r--r--drivers/infiniband/sw/rdmavt/mmap.c4
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mmap.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c9
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c8
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c65
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h3
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c3
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/misc/cm109.c4
-rw-r--r--drivers/input/misc/ims-pcu.c4
-rw-r--r--drivers/input/misc/yealink.c4
-rw-r--r--drivers/input/mouse/alps.c72
-rw-r--r--drivers/input/mouse/alps.h11
-rw-r--r--drivers/input/mouse/elan_i2c_core.c20
-rw-r--r--drivers/input/mouse/elantech.c8
-rw-r--r--drivers/input/rmi4/rmi_f30.c4
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h14
-rw-r--r--drivers/input/tablet/hanwang.c3
-rw-r--r--drivers/input/tablet/kbtab.c3
-rw-r--r--drivers/input/touchscreen/sur40.c3
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/arm-smmu-v3.c2
-rw-r--r--drivers/iommu/arm-smmu.c2
-rw-r--r--drivers/iommu/exynos-iommu.c8
-rw-r--r--drivers/iommu/intel-iommu.c4
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c6
-rw-r--r--drivers/iommu/io-pgtable-arm.c6
-rw-r--r--drivers/iommu/iommu.c5
-rw-r--r--drivers/irqchip/Kconfig1
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c4
-rw-r--r--drivers/isdn/capi/kcapi.c1
-rw-r--r--drivers/md/dm-cache-metadata.c8
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-rq.c1
-rw-r--r--drivers/md/dm-verity-fec.c18
-rw-r--r--drivers/md/dm-verity-fec.h4
-rw-r--r--drivers/media/platform/coda/imx-vdoa.c2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c2
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c2
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-firmware.c22
-rw-r--r--drivers/memory/omap-gpmc.c4
-rw-r--r--drivers/misc/cxl/pci.c13
-rw-r--r--drivers/misc/mei/bus-fixup.c14
-rw-r--r--drivers/misc/mei/init.c8
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c4
-rw-r--r--drivers/mmc/core/block.c7
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/sdio_bus.c12
-rw-r--r--drivers/mmc/host/dw_mmc.c11
-rw-r--r--drivers/mmc/host/mtk-sd.c4
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c1
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c14
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c30
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c4
-rw-r--r--drivers/mmc/host/sdhci.c10
-rw-r--r--drivers/mmc/host/ushc.c3
-rw-r--r--drivers/mtd/ubi/upd.c8
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c2
-rw-r--r--drivers/net/can/rcar/rcar_can.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h30
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c20
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c102
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c23
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c30
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c15
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c10
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c1
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c9
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c4
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c28
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c16
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c31
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c20
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c13
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c122
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c11
-rw-r--r--drivers/net/ethernet/sfc/efx.c9
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c7
-rw-r--r--drivers/net/ethernet/ti/Kconfig10
-rw-r--r--drivers/net/ethernet/ti/Makefile2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c16
-rw-r--r--drivers/net/fjes/fjes_main.c78
-rw-r--r--drivers/net/hyperv/netvsc.c7
-rw-r--r--drivers/net/irda/vlsi_ir.c8
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/phy/mdio-boardinfo.c1
-rw-r--r--drivers/net/phy/micrel.c17
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/team/team.c19
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/cdc_ether.c15
-rw-r--r--drivers/net/usb/ch9200.c9
-rw-r--r--drivers/net/usb/cx82310_eth.c7
-rw-r--r--drivers/net/usb/kaweth.c18
-rw-r--r--drivers/net/usb/lan78xx.c9
-rw-r--r--drivers/net/usb/qmi_wwan.c8
-rw-r--r--drivers/net/usb/r8152.c45
-rw-r--r--drivers/net/usb/smsc75xx.c8
-rw-r--r--drivers/net/usb/smsc95xx.c12
-rw-r--r--drivers/net/usb/sr9700.c9
-rw-r--r--drivers/net/usb/usbnet.c19
-rw-r--r--drivers/net/virtio_net.c45
-rw-r--r--drivers/net/vrf.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c48
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c38
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c6
-rw-r--r--drivers/nvdimm/bus.c6
-rw-r--r--drivers/nvdimm/claim.c10
-rw-r--r--drivers/nvdimm/dimm_devs.c77
-rw-r--r--drivers/nvme/host/core.c25
-rw-r--r--drivers/nvme/host/fc.c2
-rw-r--r--drivers/nvme/host/nvme.h5
-rw-r--r--drivers/nvme/host/pci.c26
-rw-r--r--drivers/nvme/host/rdma.c30
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/core.c11
-rw-r--r--drivers/nvme/target/io-cmd.c4
-rw-r--r--drivers/nvme/target/loop.c92
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/rdma.c8
-rw-r--r--drivers/parport/share.c6
-rw-r--r--drivers/pci/dwc/Kconfig1
-rw-r--r--drivers/pci/dwc/pcie-artpec6.c4
-rw-r--r--drivers/pci/dwc/pcie-designware-plat.c4
-rw-r--r--drivers/pci/dwc/pcie-hisi.c6
-rw-r--r--drivers/pci/host/pci-thunder-pem.c64
-rw-r--r--drivers/pci/host/pcie-iproc-bcma.c24
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c19
-rw-r--r--drivers/pci/host/pcie-iproc.h1
-rw-r--r--drivers/phy/Kconfig9
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/phy-bcm-nsp-usb3.c177
-rw-r--r--drivers/phy/phy-exynos-pcie.c4
-rw-r--r--drivers/pinctrl/core.c97
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c26
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c4
-rw-r--r--drivers/pinctrl/pinctrl-single.c2
-rw-r--r--drivers/pinctrl/pinctrl-st.c30
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq4019.c30
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c4
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c80
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h11
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c13
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c11
-rw-r--r--drivers/pinctrl/ti/Kconfig2
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c2
-rw-r--r--drivers/ptp/ptp_kvm.c5
-rw-r--r--drivers/pwm/pwm-lpss-pci.c10
-rw-r--r--drivers/pwm/pwm-lpss-platform.c1
-rw-r--r--drivers/pwm/pwm-lpss.c19
-rw-r--r--drivers/pwm/pwm-lpss.h1
-rw-r--r--drivers/pwm/pwm-rockchip.c40
-rw-r--r--drivers/rapidio/devices/tsi721.c4
-rw-r--r--drivers/rapidio/devices/tsi721.h4
-rw-r--r--drivers/remoteproc/Kconfig6
-rw-r--r--drivers/reset/core.c22
-rw-r--r--drivers/s390/crypto/pkey_api.c53
-rw-r--r--drivers/s390/net/qeth_core.h3
-rw-r--r--drivers/s390/net/qeth_core_main.c5
-rw-r--r--drivers/s390/net/qeth_l2_main.c5
-rw-r--r--drivers/s390/net/qeth_l3_main.c20
-rw-r--r--drivers/scsi/Kconfig14
-rw-r--r--drivers/scsi/aacraid/aacraid.h11
-rw-r--r--drivers/scsi/aacraid/commsup.c17
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c38
-rw-r--r--drivers/scsi/hpsa.c54
-rw-r--r--drivers/scsi/hpsa.h1
-rw-r--r--drivers/scsi/hpsa_cmd.h2
-rw-r--r--drivers/scsi/ipr.c7
-rw-r--r--drivers/scsi/libsas/sas_ata.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c12
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c17
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c4
-rw-r--r--drivers/scsi/qedf/qedf_fip.c3
-rw-r--r--drivers/scsi/qedf/qedf_main.c1
-rw-r--r--drivers/scsi/qedi/qedi_main.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c10
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/scsi/sd.c23
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/sr.c6
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c4
-rw-r--r--drivers/scsi/ufs/ufshcd.c4
-rw-r--r--drivers/staging/android/ashmem.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c16
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h2
-rw-r--r--drivers/target/target_core_alua.c136
-rw-r--r--drivers/target/target_core_configfs.c2
-rw-r--r--drivers/target/target_core_fabric_configfs.c5
-rw-r--r--drivers/target/target_core_tpg.c4
-rw-r--r--drivers/target/target_core_transport.c102
-rw-r--r--drivers/target/target_core_user.c97
-rw-r--r--drivers/thermal/cpu_cooling.c39
-rw-r--r--drivers/thermal/devfreq_cooling.c14
-rw-r--r--drivers/tty/serial/8250/8250_dw.c9
-rw-r--r--drivers/tty/serial/8250/Kconfig8
-rw-r--r--drivers/tty/serial/amba-pl011.c25
-rw-r--r--drivers/tty/serial/atmel_serial.c8
-rw-r--r--drivers/tty/serial/mxs-auart.c2
-rw-r--r--drivers/tty/serial/st-asc.c11
-rw-r--r--drivers/tty/tty_ldisc.c7
-rw-r--r--drivers/tty/vt/keyboard.c1
-rw-r--r--drivers/usb/class/usbtmc.c18
-rw-r--r--drivers/usb/core/config.c10
-rw-r--r--drivers/usb/core/hcd.c7
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/core/quirks.c8
-rw-r--r--drivers/usb/dwc3/gadget.c21
-rw-r--r--drivers/usb/gadget/function/f_acm.c4
-rw-r--r--drivers/usb/gadget/function/f_hid.c8
-rw-r--r--drivers/usb/gadget/function/f_tcm.c2
-rw-r--r--drivers/usb/gadget/function/f_uvc.c10
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c1
-rw-r--r--drivers/usb/host/xhci-plat.c1
-rw-r--r--drivers/usb/host/xhci-ring.c3
-rw-r--r--drivers/usb/host/xhci.c43
-rw-r--r--drivers/usb/misc/idmouse.c3
-rw-r--r--drivers/usb/misc/lvstest.c4
-rw-r--r--drivers/usb/misc/uss720.c5
-rw-r--r--drivers/usb/musb/musb_core.c2
-rw-r--r--drivers/usb/musb/musb_cppi41.c23
-rw-r--r--drivers/usb/musb/musb_dsps.c5
-rw-r--r--drivers/usb/phy/phy-isp1301.c2
-rw-r--r--drivers/usb/serial/option.c17
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/wusbcore/wa-hc.c3
-rw-r--r--drivers/uwb/hwa-rc.c3
-rw-r--r--drivers/uwb/i1480/dfu/usb.c3
-rw-r--r--drivers/vfio/vfio.c8
-rw-r--r--drivers/vfio/vfio_iommu_type1.c7
-rw-r--r--drivers/vhost/vsock.c41
-rw-r--r--drivers/video/backlight/pwm_bl.c7
-rw-r--r--drivers/video/fbdev/efifb.c66
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c15
-rw-r--r--drivers/video/fbdev/ssd1307fb.c24
-rw-r--r--drivers/video/fbdev/xen-fbfront.c4
-rw-r--r--drivers/virtio/virtio.c6
-rw-r--r--drivers/virtio/virtio_balloon.c19
-rw-r--r--drivers/virtio/virtio_pci_common.c374
-rw-r--r--drivers/virtio/virtio_pci_common.h43
-rw-r--r--drivers/virtio/virtio_pci_legacy.c8
-rw-r--r--drivers/virtio/virtio_pci_modern.c8
-rw-r--r--drivers/xen/xen-acpi-processor.c36
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c4
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent_io.c49
-rw-r--r--fs/btrfs/inode.c42
-rw-r--r--fs/btrfs/qgroup.c10
-rw-r--r--fs/btrfs/send.c7
-rw-r--r--fs/btrfs/super.c3
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/cifs/cifsfs.c87
-rw-r--r--fs/cifs/cifsfs.h5
-rw-r--r--fs/cifs/cifsglob.h19
-rw-r--r--fs/cifs/cifssmb.c4
-rw-r--r--fs/cifs/connect.c16
-rw-r--r--fs/cifs/file.c6
-rw-r--r--fs/cifs/ioctl.c66
-rw-r--r--fs/cifs/smb1ops.c10
-rw-r--r--fs/cifs/smb2misc.c46
-rw-r--r--fs/cifs/smb2ops.c37
-rw-r--r--fs/cifs/smb2pdu.c23
-rw-r--r--fs/cifs/smb2proto.h7
-rw-r--r--fs/cifs/smb2transport.c55
-rw-r--r--fs/cifs/transport.c2
-rw-r--r--fs/crypto/crypto.c10
-rw-r--r--fs/crypto/fname.c2
-rw-r--r--fs/crypto/fscrypt_private.h4
-rw-r--r--fs/crypto/keyinfo.c52
-rw-r--r--fs/crypto/policy.c7
-rw-r--r--fs/dax.c35
-rw-r--r--fs/ext4/ext4.h1
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/inline.c5
-rw-r--r--fs/ext4/inode.c43
-rw-r--r--fs/ext4/move_extent.c2
-rw-r--r--fs/ext4/namei.c2
-rw-r--r--fs/ext4/super.c10
-rw-r--r--fs/ext4/symlink.c3
-rw-r--r--fs/ext4/xattr.c65
-rw-r--r--fs/f2fs/debug.c1
-rw-r--r--fs/f2fs/dir.c2
-rw-r--r--fs/f2fs/f2fs.h2
-rw-r--r--fs/f2fs/node.c163
-rw-r--r--fs/f2fs/segment.c6
-rw-r--r--fs/hugetlbfs/inode.c40
-rw-r--r--fs/jbd2/journal.c22
-rw-r--r--fs/jbd2/revoke.c1
-rw-r--r--fs/kernfs/file.c3
-rw-r--r--fs/namei.c3
-rw-r--r--fs/nfs/dir.c9
-rw-r--r--fs/nfs/filelayout/filelayout.c151
-rw-r--r--fs/nfs/filelayout/filelayout.h19
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c4
-rw-r--r--fs/nfs/nfs4proc.c9
-rw-r--r--fs/nfsd/nfs4proc.c2
-rw-r--r--fs/nfsd/nfsctl.c43
-rw-r--r--fs/nfsd/nfsproc.c1
-rw-r--r--fs/nfsd/nfssvc.c28
-rw-r--r--fs/nsfs.c1
-rw-r--r--fs/orangefs/devorangefs-req.c9
-rw-r--r--fs/orangefs/orangefs-kernel.h1
-rw-r--r--fs/orangefs/super.c32
-rw-r--r--fs/proc/proc_sysctl.c1
-rw-r--r--fs/proc/task_mmu.c9
-rw-r--r--fs/stat.c86
-rw-r--r--fs/sysfs/file.c6
-rw-r--r--fs/ubifs/debug.c10
-rw-r--r--fs/ubifs/dir.c18
-rw-r--r--fs/userfaultfd.c2
-rw-r--r--fs/xfs/libxfs/xfs_dir2_priv.h3
-rw-r--r--fs/xfs/libxfs/xfs_dir2_sf.c63
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c35
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h2
-rw-r--r--fs/xfs/xfs_bmap_util.c10
-rw-r--r--fs/xfs/xfs_inode.c19
-rw-r--r--fs/xfs/xfs_iops.c14
-rw-r--r--fs/xfs/xfs_itable.c2
-rw-r--r--include/asm-generic/sections.h6
-rw-r--r--include/asm-generic/vmlinux.lds.h6
-rw-r--r--include/crypto/internal/hash.h10
-rw-r--r--include/drm/ttm/ttm_object.h5
-rw-r--r--include/kvm/arm_vgic.h1
-rw-r--r--include/linux/blk-mq.h2
-rw-r--r--include/linux/blkdev.h33
-rw-r--r--include/linux/ccp.h2
-rw-r--r--include/linux/cgroup.h21
-rw-r--r--include/linux/clockchips.h2
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/errqueue.h2
-rw-r--r--include/linux/fscrypt_common.h1
-rw-r--r--include/linux/gpio/consumer.h16
-rw-r--r--include/linux/hwmon.h1
-rw-r--r--include/linux/hyperv.h10
-rw-r--r--include/linux/iio/sw_device.h2
-rw-r--r--include/linux/iommu.h18
-rw-r--r--include/linux/irqchip/arm-gic.h3
-rw-r--r--include/linux/kasan.h3
-rw-r--r--include/linux/kvm_host.h4
-rw-r--r--include/linux/memcontrol.h6
-rw-r--r--include/linux/mfd/cros_ec.h3
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mmc/sdio_func.h2
-rw-r--r--include/linux/mmu_notifier.h13
-rw-r--r--include/linux/nvme.h16
-rw-r--r--include/linux/omap-gpmc.h16
-rw-r--r--include/linux/pinctrl/pinctrl.h3
-rw-r--r--include/linux/reset.h31
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/sched/clock.h13
-rw-r--r--include/linux/stat.h1
-rw-r--r--include/linux/uio.h6
-rw-r--r--include/linux/usb/quirks.h6
-rw-r--r--include/linux/virtio.h1
-rw-r--r--include/linux/virtio_vsock.h3
-rw-r--r--include/net/af_vsock.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netfilter/nf_tables.h30
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h6
-rw-r--r--include/net/sctp/sctp.h22
-rw-r--r--include/net/sctp/structs.h16
-rw-r--r--include/rdma/ib_verbs.h30
-rw-r--r--include/sound/cs35l35.h108
-rw-r--r--include/sound/soc.h14
-rw-r--r--include/target/target_core_base.h10
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/btrfs.h27
-rw-r--r--include/uapi/linux/ipv6_route.h2
-rw-r--r--include/uapi/linux/stat.h5
-rw-r--r--include/uapi/linux/virtio_pci.h2
-rw-r--r--include/uapi/rdma/mlx5-abi.h3
-rw-r--r--include/video/exynos5433_decon.h12
-rw-r--r--init/main.c2
-rw-r--r--kernel/audit.c622
-rw-r--r--kernel/audit.h15
-rw-r--r--kernel/auditsc.c29
-rw-r--r--kernel/bpf/core.c12
-rw-r--r--kernel/bpf/hashtab.c144
-rw-r--r--kernel/bpf/syscall.c8
-rw-r--r--kernel/bpf/verifier.c64
-rw-r--r--kernel/cgroup/cgroup.c9
-rw-r--r--kernel/irq/affinity.c20
-rw-r--r--kernel/kthread.c3
-rw-r--r--kernel/locking/lockdep_internals.h6
-rw-r--r--kernel/padata.c5
-rw-r--r--kernel/ptrace.c14
-rw-r--r--kernel/sched/clock.c46
-rw-r--r--kernel/sched/cpufreq_schedutil.c20
-rw-r--r--kernel/sysctl.c3
-rw-r--r--kernel/trace/ftrace.c29
-rw-r--r--kernel/trace/ring_buffer.c24
-rw-r--r--kernel/trace/trace.c9
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--lib/Kconfig.debug6
-rw-r--r--lib/iov_iter.c63
-rw-r--r--lib/syscall.c1
-rw-r--r--lib/test_kasan.c10
-rw-r--r--mm/huge_memory.c99
-rw-r--r--mm/hugetlb.c10
-rw-r--r--mm/internal.h7
-rw-r--r--mm/kasan/kasan.h5
-rw-r--r--mm/kasan/report.c36
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/mempolicy.c20
-rw-r--r--mm/migrate.c9
-rw-r--r--mm/page_alloc.c54
-rw-r--r--mm/page_vma_mapped.c15
-rw-r--r--mm/rmap.c4
-rw-r--r--mm/swap.c27
-rw-r--r--mm/swap_cgroup.c2
-rw-r--r--mm/swap_slots.c2
-rw-r--r--mm/vmstat.c18
-rw-r--r--mm/workingset.c2
-rw-r--r--mm/z3fold.c9
-rw-r--r--mm/zsmalloc.c2
-rw-r--r--net/batman-adv/bat_iv_ogm.c11
-rw-r--r--net/batman-adv/bat_v.c14
-rw-r--r--net/batman-adv/fragmentation.c20
-rw-r--r--net/batman-adv/gateway_common.c5
-rw-r--r--net/batman-adv/soft-interface.c1
-rw-r--r--net/batman-adv/types.h2
-rw-r--r--net/bridge/br_device.c20
-rw-r--r--net/bridge/br_fdb.c2
-rw-r--r--net/bridge/br_if.c1
-rw-r--r--net/bridge/br_multicast.c7
-rw-r--r--net/bridge/br_netfilter_hooks.c12
-rw-r--r--net/bridge/br_netlink.c7
-rw-r--r--net/bridge/br_private.h14
-rw-r--r--net/ceph/messenger.c6
-rw-r--r--net/core/datagram.c23
-rw-r--r--net/core/dev.c1
-rw-r--r--net/core/flow_dissector.c2
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/netclassid_cgroup.c32
-rw-r--r--net/core/netpoll.c10
-rw-r--r--net/core/secure_seq.c31
-rw-r--r--net/core/skbuff.c46
-rw-r--r--net/core/sock.c16
-rw-r--r--net/core/sysctl_net_core.c6
-rw-r--r--net/ipv4/fib_frontend.c3
-rw-r--r--net/ipv4/ip_fragment.c25
-rw-r--r--net/ipv4/ip_sockglue.c10
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/ipmr.c11
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_l3proto_ipv4.c5
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c20
-rw-r--r--net/ipv4/netfilter/nft_masq_ipv4.c8
-rw-r--r--net/ipv4/netfilter/nft_redir_ipv4.c8
-rw-r--r--net/ipv4/ping.c5
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_input.c63
-rw-r--r--net/ipv4/tcp_minisocks.c1
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv4/tcp_recovery.c3
-rw-r--r--net/ipv6/addrconf.c11
-rw-r--r--net/ipv6/datagram.c10
-rw-r--r--net/ipv6/exthdrs.c1
-rw-r--r--net/ipv6/ip6_input.c7
-rw-r--r--net/ipv6/ip6mr.c13
-rw-r--r--net/ipv6/netfilter/nft_masq_ipv6.c8
-rw-r--r--net/ipv6/netfilter/nft_redir_ipv6.c8
-rw-r--r--net/ipv6/route.c6
-rw-r--r--net/ipv6/seg6.c3
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/kcm/kcmsock.c6
-rw-r--r--net/key/af_key.c93
-rw-r--r--net/l2tp/l2tp_core.c160
-rw-r--r--net/l2tp/l2tp_core.h9
-rw-r--r--net/l2tp/l2tp_debugfs.c10
-rw-r--r--net/l2tp/l2tp_eth.c10
-rw-r--r--net/l2tp/l2tp_ip.c22
-rw-r--r--net/l2tp/l2tp_ip6.c23
-rw-r--r--net/l2tp/l2tp_netlink.c52
-rw-r--r--net/l2tp/l2tp_ppp.c103
-rw-r--r--net/mac80211/iface.c3
-rw-r--r--net/mac80211/rx.c86
-rw-r--r--net/mpls/af_mpls.c13
-rw-r--r--net/netfilter/nf_conntrack_core.c6
-rw-r--r--net/netfilter/nf_conntrack_ecache.c2
-rw-r--r--net/netfilter/nf_conntrack_expect.c4
-rw-r--r--net/netfilter/nf_conntrack_extend.c13
-rw-r--r--net/netfilter/nf_conntrack_helper.c17
-rw-r--r--net/netfilter/nf_conntrack_netlink.c42
-rw-r--r--net/netfilter/nf_nat_core.c2
-rw-r--r--net/netfilter/nf_nat_proto_sctp.c13
-rw-r--r--net/netfilter/nf_nat_redirect.c2
-rw-r--r--net/netfilter/nf_tables_api.c4
-rw-r--r--net/netfilter/nfnetlink_cthelper.c287
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c9
-rw-r--r--net/netfilter/nft_ct.c21
-rw-r--r--net/netfilter/nft_hash.c10
-rw-r--r--net/netfilter/nft_meta.c40
-rw-r--r--net/netfilter/nft_nat.c8
-rw-r--r--net/netfilter/nft_set_bitmap.c165
-rw-r--r--net/netfilter/xt_TCPMSS.c6
-rw-r--r--net/netfilter/xt_TPROXY.c5
-rw-r--r--net/netlink/af_netlink.c41
-rw-r--r--net/netlink/genetlink.c4
-rw-r--r--net/openvswitch/conntrack.c4
-rw-r--r--net/openvswitch/flow.c10
-rw-r--r--net/openvswitch/flow_netlink.c4
-rw-r--r--net/packet/af_packet.c8
-rw-r--r--net/qrtr/qrtr.c4
-rw-r--r--net/rxrpc/conn_event.c4
-rw-r--r--net/sched/act_api.c55
-rw-r--r--net/sched/sch_dsmark.c10
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sctp/associola.c19
-rw-r--r--net/sctp/input.c4
-rw-r--r--net/sctp/output.c76
-rw-r--r--net/sctp/outqueue.c14
-rw-r--r--net/sctp/proc.c4
-rw-r--r--net/sctp/sm_make_chunk.c9
-rw-r--r--net/sctp/sm_statefuns.c6
-rw-r--r--net/sctp/socket.c21
-rw-r--r--net/sctp/stream.c43
-rw-r--r--net/sctp/transport.c19
-rw-r--r--net/socket.c13
-rw-r--r--net/sunrpc/svcsock.c1
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c1
-rw-r--r--net/tipc/subscr.c7
-rw-r--r--net/unix/garbage.c17
-rw-r--r--net/vmw_vsock/af_vsock.c14
-rw-r--r--net/vmw_vsock/virtio_transport.c42
-rw-r--r--net/vmw_vsock/virtio_transport_common.c7
-rw-r--r--net/wireless/nl80211.c127
-rw-r--r--net/wireless/sysfs.c10
-rw-r--r--net/xfrm/xfrm_user.c9
-rw-r--r--samples/statx/test-statx.c12
-rw-r--r--scripts/Kbuild.include4
-rw-r--r--scripts/Makefile.lib2
-rw-r--r--scripts/kconfig/gconf.c2
-rw-r--r--security/keys/gc.c2
-rw-r--r--security/keys/keyctl.c20
-rw-r--r--security/keys/process_keys.c44
-rw-r--r--sound/hda/hdac_controller.c2
-rw-r--r--sound/soc/Kconfig2
-rw-r--r--sound/soc/Makefile2
-rw-r--r--sound/soc/blackfin/bfin-eval-adau1373.c2
-rw-r--r--sound/soc/blackfin/bfin-eval-adav80x.c2
-rw-r--r--sound/soc/codecs/Kconfig30
-rw-r--r--sound/soc/codecs/Makefile10
-rw-r--r--sound/soc/codecs/ak4613.c10
-rw-r--r--sound/soc/codecs/cs35l35.c1580
-rw-r--r--sound/soc/codecs/cs35l35.h294
-rw-r--r--sound/soc/codecs/cs4271.c2
-rw-r--r--sound/soc/codecs/cs53l30.c1
-rw-r--r--sound/soc/codecs/da7213.c13
-rw-r--r--sound/soc/codecs/dio2125.c120
-rw-r--r--sound/soc/codecs/es7134.c116
-rw-r--r--sound/soc/codecs/es8328.c51
-rw-r--r--sound/soc/codecs/hdac_hdmi.c4
-rw-r--r--sound/soc/codecs/max9867.c4
-rw-r--r--sound/soc/codecs/max98927.c841
-rw-r--r--sound/soc/codecs/max98927.h272
-rw-r--r--sound/soc/codecs/nau8540.c1224
-rw-r--r--sound/soc/codecs/nau8540.h310
-rw-r--r--sound/soc/codecs/nau8824.c1831
-rw-r--r--sound/soc/codecs/nau8824.h466
-rw-r--r--sound/soc/codecs/rt5514.c36
-rw-r--r--sound/soc/codecs/rt5645.c10
-rw-r--r--sound/soc/codecs/rt5665.c222
-rw-r--r--sound/soc/codecs/rt5665.h2
-rw-r--r--sound/soc/codecs/rt5670.c21
-rw-r--r--sound/soc/codecs/rt5677.c7
-rw-r--r--sound/soc/codecs/sgtl5000.c19
-rw-r--r--sound/soc/codecs/ssm4567.c9
-rw-r--r--sound/soc/codecs/sta529.c7
-rw-r--r--sound/soc/codecs/tas2552.c6
-rw-r--r--sound/soc/codecs/tlv320aic23.c7
-rw-r--r--sound/soc/codecs/twl6040.c8
-rw-r--r--sound/soc/codecs/uda1380.c7
-rw-r--r--sound/soc/codecs/wm5100.c2
-rw-r--r--sound/soc/codecs/wm8903.c31
-rw-r--r--sound/soc/codecs/wm8960.c195
-rw-r--r--sound/soc/codecs/wm8978.c7
-rw-r--r--sound/soc/codecs/wm_adsp.c324
-rw-r--r--sound/soc/codecs/wm_adsp.h24
-rw-r--r--sound/soc/dwc/Kconfig4
-rw-r--r--sound/soc/dwc/Makefile6
-rw-r--r--sound/soc/dwc/dwc-i2s.c (renamed from sound/soc/dwc/designware_i2s.c)0
-rw-r--r--sound/soc/dwc/dwc-pcm.c (renamed from sound/soc/dwc/designware_pcm.c)3
-rw-r--r--sound/soc/fsl/eukrea-tlv320.c2
-rw-r--r--sound/soc/fsl/fsl_asrc_dma.c2
-rw-r--r--sound/soc/fsl/fsl_esai.c5
-rw-r--r--sound/soc/fsl/fsl_ssi.c27
-rw-r--r--sound/soc/fsl/imx-mc13783.c2
-rw-r--r--sound/soc/fsl/imx-pcm-dma.c28
-rw-r--r--sound/soc/fsl/imx-pcm-fiq.c2
-rw-r--r--sound/soc/fsl/imx-wm8962.c72
-rw-r--r--sound/soc/fsl/mpc8610_hpcd.c2
-rw-r--r--sound/soc/fsl/mx27vis-aic32x4.c2
-rw-r--r--sound/soc/fsl/p1022_ds.c2
-rw-r--r--sound/soc/fsl/p1022_rdk.c2
-rw-r--r--sound/soc/fsl/phycore-ac97.c2
-rw-r--r--sound/soc/fsl/wm1133-ev1.c2
-rw-r--r--sound/soc/generic/simple-card.c43
-rw-r--r--sound/soc/generic/simple-scu-card.c37
-rw-r--r--sound/soc/hisilicon/Kconfig5
-rw-r--r--sound/soc/hisilicon/Makefile1
-rw-r--r--sound/soc/hisilicon/hi6210-i2s.c618
-rw-r--r--sound/soc/hisilicon/hi6210-i2s.h276
-rw-r--r--sound/soc/intel/Kconfig24
-rw-r--r--sound/soc/intel/atom/sst/sst_acpi.c41
-rw-r--r--sound/soc/intel/atom/sst/sst_ipc.c4
-rw-r--r--sound/soc/intel/boards/Makefile4
-rw-r--r--sound/soc/intel/boards/bdw-rt5677.c5
-rw-r--r--sound/soc/intel/boards/broadwell.c3
-rw-r--r--sound/soc/intel/boards/bxt_da7219_max98357a.c97
-rw-r--r--sound/soc/intel/boards/bxt_rt298.c3
-rw-r--r--sound/soc/intel/boards/bytcht_da7213.c283
-rw-r--r--sound/soc/intel/boards/bytcht_nocodec.c208
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c109
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.c6
-rw-r--r--sound/soc/intel/skylake/bxt-sst.c118
-rw-r--r--sound/soc/intel/skylake/skl-messages.c16
-rw-r--r--sound/soc/intel/skylake/skl-nhlt.c7
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c118
-rw-r--r--sound/soc/intel/skylake/skl-sst-cldma.c26
-rw-r--r--sound/soc/intel/skylake/skl-sst-cldma.h2
-rw-r--r--sound/soc/intel/skylake/skl-sst-dsp.c6
-rw-r--r--sound/soc/intel/skylake/skl-sst-dsp.h40
-rw-r--r--sound/soc/intel/skylake/skl-sst-ipc.c76
-rw-r--r--sound/soc/intel/skylake/skl-sst-ipc.h17
-rw-r--r--sound/soc/intel/skylake/skl-sst-utils.c140
-rw-r--r--sound/soc/intel/skylake/skl-sst.c175
-rw-r--r--sound/soc/intel/skylake/skl-topology.c247
-rw-r--r--sound/soc/intel/skylake/skl-topology.h17
-rw-r--r--sound/soc/intel/skylake/skl.c2
-rw-r--r--sound/soc/intel/skylake/skl.h1
-rw-r--r--sound/soc/mediatek/Kconfig10
-rw-r--r--sound/soc/mediatek/mt2701/Makefile1
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-afe-pcm.c16
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-cs42448.c2
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-wm8960.c176
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-max98090.c2
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c2
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c2
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650.c2
-rw-r--r--sound/soc/omap/am3517evm.c2
-rw-r--r--sound/soc/omap/n810.c2
-rw-r--r--sound/soc/omap/omap-abe-twl6040.c2
-rw-r--r--sound/soc/omap/omap-twl4030.c2
-rw-r--r--sound/soc/omap/omap3pandora.c2
-rw-r--r--sound/soc/omap/osk5912.c2
-rw-r--r--sound/soc/omap/rx51.c7
-rw-r--r--sound/soc/pxa/brownstone.c2
-rw-r--r--sound/soc/pxa/corgi.c2
-rw-r--r--sound/soc/pxa/e750_wm9705.c2
-rw-r--r--sound/soc/pxa/e800_wm9712.c2
-rw-r--r--sound/soc/pxa/em-x270.c2
-rw-r--r--sound/soc/pxa/hx4700.c2
-rw-r--r--sound/soc/pxa/imote2.c2
-rw-r--r--sound/soc/pxa/magician.c4
-rw-r--r--sound/soc/pxa/mioa701_wm9713.c2
-rw-r--r--sound/soc/pxa/mmp-pcm.c1
-rw-r--r--sound/soc/pxa/mmp-sspa.c1
-rw-r--r--sound/soc/pxa/poodle.c2
-rw-r--r--sound/soc/pxa/pxa-ssp.c15
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c5
-rw-r--r--sound/soc/pxa/pxa2xx-i2s.c8
-rw-r--r--sound/soc/pxa/pxa2xx-pcm.c2
-rw-r--r--sound/soc/pxa/raumfeld.c8
-rw-r--r--sound/soc/pxa/spitz.c6
-rw-r--r--sound/soc/pxa/tosa.c4
-rw-r--r--sound/soc/pxa/z2.c6
-rw-r--r--sound/soc/pxa/zylonite.c2
-rw-r--r--sound/soc/qcom/lpass-apq8016.c12
-rw-r--r--sound/soc/qcom/lpass-cpu.c22
-rw-r--r--sound/soc/qcom/lpass-ipq806x.c6
-rw-r--r--sound/soc/qcom/lpass.h2
-rw-r--r--sound/soc/rockchip/rk3288_hdmi_analog.c3
-rw-r--r--sound/soc/samsung/Kconfig8
-rw-r--r--sound/soc/samsung/Makefile2
-rw-r--r--sound/soc/samsung/bells.c1
-rw-r--r--sound/soc/samsung/i2s-regs.h2
-rw-r--r--sound/soc/samsung/i2s.c1
-rw-r--r--sound/soc/samsung/odroid.c219
-rw-r--r--sound/soc/samsung/s3c-i2s-v2.c1
-rw-r--r--sound/soc/sh/rcar/adg.c75
-rw-r--r--sound/soc/sh/rcar/core.c111
-rw-r--r--sound/soc/sh/rcar/dvc.c24
-rw-r--r--sound/soc/sh/rcar/rsnd.h55
-rw-r--r--sound/soc/sh/rcar/src.c3
-rw-r--r--sound/soc/sh/rcar/ssi.c9
-rw-r--r--sound/soc/sirf/sirf-audio-port.c1
-rw-r--r--sound/soc/sirf/sirf-audio.c1
-rw-r--r--sound/soc/sirf/sirf-usp.c3
-rw-r--r--sound/soc/soc-core.c27
-rw-r--r--sound/soc/soc-jack.c48
-rw-r--r--sound/soc/soc-topology.c5
-rw-r--r--sound/soc/sti/uniperif_player.c2
-rw-r--r--sound/soc/stm/Kconfig8
-rw-r--r--sound/soc/stm/Makefile6
-rw-r--r--sound/soc/stm/stm32_sai.c115
-rw-r--r--sound/soc/stm/stm32_sai.h200
-rw-r--r--sound/soc/stm/stm32_sai_sub.c884
-rw-r--r--sound/soc/sunxi/sun8i-codec-analog.c168
-rw-r--r--sound/soc/sunxi/sun8i-codec.c10
-rw-r--r--sound/soc/tegra/tegra20_ac97.c1
-rw-r--r--sound/soc/tegra/tegra20_das.c2
-rw-r--r--sound/soc/tegra/tegra20_i2s.c1
-rw-r--r--sound/soc/tegra/tegra20_spdif.c5
-rw-r--r--sound/soc/tegra/tegra30_ahub.c5
-rw-r--r--sound/soc/tegra/tegra30_i2s.c1
-rw-r--r--sound/soc/tegra/tegra_alc5632.c4
-rw-r--r--sound/soc/tegra/tegra_max98090.c4
-rw-r--r--sound/soc/tegra/tegra_rt5640.c4
-rw-r--r--sound/soc/tegra/tegra_sgtl5000.c4
-rw-r--r--sound/soc/tegra/tegra_wm8753.c4
-rw-r--r--sound/soc/tegra/tegra_wm8903.c4
-rw-r--r--sound/soc/tegra/tegra_wm9712.c4
-rw-r--r--sound/soc/tegra/trimslice.c4
-rw-r--r--sound/soc/txx9/txx9aclc.c5
-rw-r--r--sound/soc/ux500/mop500.c4
-rw-r--r--sound/soc/ux500/ux500_msp_dai.c4
-rw-r--r--sound/soc/ux500/ux500_msp_i2s.c1
-rw-r--r--sound/soc/zte/Kconfig8
-rw-r--r--sound/soc/zte/Makefile1
-rw-r--r--sound/soc/zte/zx-tdm.c461
-rw-r--r--tools/include/linux/filter.h10
-rw-r--r--tools/perf/util/annotate.c6
-rw-r--r--tools/power/cpupower/utils/helpers/cpuid.c1
-rw-r--r--tools/power/x86/turbostat/turbostat.82
-rw-r--r--tools/power/x86/turbostat/turbostat.c26
-rw-r--r--tools/testing/selftests/bpf/Makefile26
-rw-r--r--tools/testing/selftests/bpf/test_maps.c33
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c290
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc117
-rw-r--r--tools/testing/selftests/net/psock_fanout.c22
-rw-r--r--tools/testing/selftests/net/psock_lib.h13
-rw-r--r--tools/testing/selftests/powerpc/Makefile10
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c19
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c20
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c23
-rw-r--r--virt/kvm/arm/vgic/vgic.h11
-rw-r--r--virt/kvm/eventfd.c3
-rw-r--r--virt/kvm/kvm_main.c44
1233 files changed, 26137 insertions, 8842 deletions
diff --git a/.mailmap b/.mailmap
index 67dc22ffc9a8..1d6f4e7280dc 100644
--- a/.mailmap
+++ b/.mailmap
@@ -99,6 +99,8 @@ Linas Vepstas <linas@austin.ibm.com>
99Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de> 99Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
100Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch> 100Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
101Mark Brown <broonie@sirena.org.uk> 101Mark Brown <broonie@sirena.org.uk>
102Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
103Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
102Matthieu CASTET <castet.matthieu@free.fr> 104Matthieu CASTET <castet.matthieu@free.fr>
103Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br> 105Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br>
104Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com> 106Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com>
@@ -171,6 +173,7 @@ Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
171Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com> 173Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
172Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com> 174Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
173Takashi YOSHII <takashi.yoshii.zj@renesas.com> 175Takashi YOSHII <takashi.yoshii.zj@renesas.com>
176Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
174Yusuke Goda <goda.yusuke@renesas.com> 177Yusuke Goda <goda.yusuke@renesas.com>
175Gustavo Padovan <gustavo@las.ic.unicamp.br> 178Gustavo Padovan <gustavo@las.ic.unicamp.br>
176Gustavo Padovan <padovan@profusion.mobi> 179Gustavo Padovan <padovan@profusion.mobi>
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 2ba45caabada..facc20a3f962 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1725,6 +1725,12 @@
1725 kernel and module base offset ASLR (Address Space 1725 kernel and module base offset ASLR (Address Space
1726 Layout Randomization). 1726 Layout Randomization).
1727 1727
1728 kasan_multi_shot
1729 [KNL] Enforce KASAN (Kernel Address Sanitizer) to print
1730 report on every invalid memory access. Without this
1731 parameter KASAN will print report only for the first
1732 invalid access.
1733
1728 keepinitrd [HW,ARM] 1734 keepinitrd [HW,ARM]
1729 1735
1730 kernelcore= [KNL,X86,IA-64,PPC] 1736 kernelcore= [KNL,X86,IA-64,PPC]
diff --git a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
index 30c546900b60..07dbb358182c 100644
--- a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
+++ b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
@@ -45,7 +45,7 @@ The following clocks are available:
45 - 1 15 SATA 45 - 1 15 SATA
46 - 1 16 SATA USB 46 - 1 16 SATA USB
47 - 1 17 Main 47 - 1 17 Main
48 - 1 18 SD/MMC 48 - 1 18 SD/MMC/GOP
49 - 1 21 Slow IO (SPI, NOR, BootROM, I2C, UART) 49 - 1 21 Slow IO (SPI, NOR, BootROM, I2C, UART)
50 - 1 22 USB3H0 50 - 1 22 USB3H0
51 - 1 23 USB3H1 51 - 1 23 USB3H1
@@ -65,7 +65,7 @@ Required properties:
65 "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio", 65 "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio",
66 "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none", 66 "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none",
67 "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata", 67 "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata",
68 "cpm-sata-usb", "cpm-main", "cpm-sd-mmc", "none", "none", "cpm-slow-io", 68 "cpm-sata-usb", "cpm-main", "cpm-sd-mmc-gop", "none", "none", "cpm-slow-io",
69 "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197"; 69 "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197";
70 70
71Example: 71Example:
@@ -78,6 +78,6 @@ Example:
78 gate-clock-output-names = "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio", 78 gate-clock-output-names = "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio",
79 "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none", 79 "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none",
80 "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata", 80 "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata",
81 "cpm-sata-usb", "cpm-main", "cpm-sd-mmc", "none", "none", "cpm-slow-io", 81 "cpm-sata-usb", "cpm-main", "cpm-sd-mmc-gop", "none", "none", "cpm-slow-io",
82 "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197"; 82 "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197";
83 }; 83 };
diff --git a/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt b/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt
index a78265993665..ca5204b3bc21 100644
--- a/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt
+++ b/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt
@@ -4,7 +4,6 @@ Required properties:
4 - compatible: value should be one of the following 4 - compatible: value should be one of the following
5 "samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */ 5 "samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */
6 "samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */ 6 "samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */
7 "samsung,exynos4415-mipi-dsi" /* for Exynos4415 SoC */
8 "samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */ 7 "samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */
9 "samsung,exynos5422-mipi-dsi" /* for Exynos5422/5800 SoCs */ 8 "samsung,exynos5422-mipi-dsi" /* for Exynos5422/5800 SoCs */
10 "samsung,exynos5433-mipi-dsi" /* for Exynos5433 SoCs */ 9 "samsung,exynos5433-mipi-dsi" /* for Exynos5433 SoCs */
diff --git a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt
index 18645e0228b0..5837402c3ade 100644
--- a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt
+++ b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt
@@ -11,7 +11,6 @@ Required properties:
11 "samsung,s5pv210-fimd"; /* for S5PV210 SoC */ 11 "samsung,s5pv210-fimd"; /* for S5PV210 SoC */
12 "samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */ 12 "samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */
13 "samsung,exynos4210-fimd"; /* for Exynos4 SoCs */ 13 "samsung,exynos4210-fimd"; /* for Exynos4 SoCs */
14 "samsung,exynos4415-fimd"; /* for Exynos4415 SoC */
15 "samsung,exynos5250-fimd"; /* for Exynos5250 SoCs */ 14 "samsung,exynos5250-fimd"; /* for Exynos5250 SoCs */
16 "samsung,exynos5420-fimd"; /* for Exynos5420/5422/5800 SoCs */ 15 "samsung,exynos5420-fimd"; /* for Exynos5420/5422/5800 SoCs */
17 16
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
index ea9c1c9607f6..520d61dad6dd 100644
--- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
@@ -13,7 +13,7 @@ Required Properties:
13 - "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following, 13 - "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following,
14 before RK3288 14 before RK3288
15 - "rockchip,rk3288-dw-mshc": for Rockchip RK3288 15 - "rockchip,rk3288-dw-mshc": for Rockchip RK3288
16 - "rockchip,rk1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK1108 16 - "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RV1108
17 - "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036 17 - "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036
18 - "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368 18 - "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368
19 - "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399 19 - "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399
diff --git a/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt b/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
index b7fa3b97986d..a339dbb15493 100644
--- a/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
@@ -44,13 +44,19 @@ Hip05 Example (note that Hip06 is the same except compatible):
44 }; 44 };
45 45
46HiSilicon Hip06/Hip07 PCIe host bridge DT (almost-ECAM) description. 46HiSilicon Hip06/Hip07 PCIe host bridge DT (almost-ECAM) description.
47
48Some BIOSes place the host controller in a mode where it is ECAM
49compliant for all devices other than the root complex. In such cases,
50the host controller should be described as below.
51
47The properties and their meanings are identical to those described in 52The properties and their meanings are identical to those described in
48host-generic-pci.txt except as listed below. 53host-generic-pci.txt except as listed below.
49 54
50Properties of the host controller node that differ from 55Properties of the host controller node that differ from
51host-generic-pci.txt: 56host-generic-pci.txt:
52 57
53- compatible : Must be "hisilicon,pcie-almost-ecam" 58- compatible : Must be "hisilicon,hip06-pcie-ecam", or
59 "hisilicon,hip07-pcie-ecam"
54 60
55- reg : Two entries: First the ECAM configuration space for any 61- reg : Two entries: First the ECAM configuration space for any
56 other bus underneath the root bus. Second, the base 62 other bus underneath the root bus. Second, the base
@@ -59,7 +65,7 @@ host-generic-pci.txt:
59 65
60Example: 66Example:
61 pcie0: pcie@a0090000 { 67 pcie0: pcie@a0090000 {
62 compatible = "hisilicon,pcie-almost-ecam"; 68 compatible = "hisilicon,hip06-pcie-ecam";
63 reg = <0 0xb0000000 0 0x2000000>, /* ECAM configuration space */ 69 reg = <0 0xb0000000 0 0x2000000>, /* ECAM configuration space */
64 <0 0xa0090000 0 0x10000>; /* host bridge registers */ 70 <0 0xa0090000 0 0x10000>; /* host bridge registers */
65 bus-range = <0 31>; 71 bus-range = <0 31>;
diff --git a/Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt b/Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt
deleted file mode 100644
index e68ae5dec9c9..000000000000
--- a/Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt
+++ /dev/null
@@ -1,39 +0,0 @@
1Broadcom USB3 phy binding for northstar plus SoC
2The USB3 phy is internal to the SoC and is accessed using mdio interface.
3
4Required mdio bus properties:
5- reg: Should be 0x0 for SoC internal USB3 phy
6- #address-cells: must be 1
7- #size-cells: must be 0
8
9Required USB3 PHY properties:
10- compatible: should be "brcm,nsp-usb3-phy"
11- reg: USB3 Phy address on SoC internal MDIO bus and it should be 0x10.
12- usb3-ctrl-syscon: handler of syscon node defining physical address
13 of usb3 control register.
14- #phy-cells: must be 0
15
16Required usb3 control properties:
17- compatible: should be "brcm,nsp-usb3-ctrl"
18- reg: offset and length of the control registers
19
20Example:
21
22 mdio@0 {
23 reg = <0x0>;
24 #address-cells = <1>;
25 #size-cells = <0>;
26
27 usb3_phy: usb-phy@10 {
28 compatible = "brcm,nsp-usb3-phy";
29 reg = <0x10>;
30 usb3-ctrl-syscon = <&usb3_ctrl>;
31 #phy-cells = <0>;
32 status = "disabled";
33 };
34 };
35
36 usb3_ctrl: syscon@104408 {
37 compatible = "brcm,nsp-usb3-ctrl", "syscon";
38 reg = <0x104408 0x3fc>;
39 };
diff --git a/Documentation/devicetree/bindings/rng/omap_rng.txt b/Documentation/devicetree/bindings/rng/omap_rng.txt
index 471477299ece..9cf7876ab434 100644
--- a/Documentation/devicetree/bindings/rng/omap_rng.txt
+++ b/Documentation/devicetree/bindings/rng/omap_rng.txt
@@ -12,7 +12,8 @@ Required properties:
12- reg : Offset and length of the register set for the module 12- reg : Offset and length of the register set for the module
13- interrupts : the interrupt number for the RNG module. 13- interrupts : the interrupt number for the RNG module.
14 Used for "ti,omap4-rng" and "inside-secure,safexcel-eip76" 14 Used for "ti,omap4-rng" and "inside-secure,safexcel-eip76"
15- clocks: the trng clock source 15- clocks: the trng clock source. Only mandatory for the
16 "inside-secure,safexcel-eip76" compatible.
16 17
17Example: 18Example:
18/* AM335x */ 19/* AM335x */
diff --git a/Documentation/devicetree/bindings/sound/cs35l35.txt b/Documentation/devicetree/bindings/sound/cs35l35.txt
new file mode 100644
index 000000000000..016b768bc722
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cs35l35.txt
@@ -0,0 +1,180 @@
1CS35L35 Boosted Speaker Amplifier
2
3Required properties:
4
5 - compatible : "cirrus,cs35l35"
6
7 - reg : the I2C address of the device for I2C
8
9 - VA-supply, VP-supply : power supplies for the device,
10 as covered in
11 Documentation/devicetree/bindings/regulator/regulator.txt.
12
13 - interrupt-parent : Specifies the phandle of the interrupt controller to
14 which the IRQs from CS35L35 are delivered to.
15 - interrupts : IRQ line info CS35L35.
16 (See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
17 for further information relating to interrupt properties)
18
19Optional properties:
20 - reset-gpios : gpio used to reset the amplifier
21
22 - cirrus,stereo-config : Boolean to determine if there are 2 AMPs for a
23 Stereo configuration
24
25 - cirrus,audio-channel : Set Location of Audio Signal on Serial Port
26 0 = Data Packet received on Left I2S Channel
27 1 = Data Packet received on Right I2S Channel
28
29 - cirrus,advisory-channel : Set Location of Advisory Signal on Serial Port
30 0 = Data Packet received on Left I2S Channel
31 1 = Data Packet received on Right I2S Channel
32
33 - cirrus,shared-boost : Boolean to enable ClassH tracking of Advisory Signal
34 if 2 Devices share Boost BST_CTL
35
36 - cirrus,external-boost : Boolean to specify the device is using an external
37 boost supply, note that sharing a boost from another cs35l35 would constitute
38 using an external supply for the slave device
39
40 - cirrus,sp-drv-strength : Value for setting the Serial Port drive strength
41 Table 3-10 of the datasheet lists drive-strength specifications
42 0 = 1x (Default)
43 1 = .5x
44 - cirrus,sp-drv-unused : Determines how unused slots should be driven on the
45 Serial Port.
46 0 - Hi-Z
47 2 - Drive 0's (Default)
48 3 - Drive 1's
49
50 - cirrus,bst-pdn-fet-on : Boolean to determine if the Boost PDN control
51 powers down with a rectification FET On or Off. If VSPK is supplied
52 externally then FET is off.
53
54 - cirrus,boost-ctl-millivolt : Boost Voltage Value. Configures the boost
55 converter's output voltage in mV. The range is from 2600mV to 9000mV with
56 increments of 100mV.
57 (Default) VP
58
59 - cirrus,boost-peak-milliamp : Boost-converter peak current limit in mA.
60 Configures the peak current by monitoring the current through the boost FET.
61 Range starts at 1680mA and goes to a maximum of 4480mA with increments of
62 110mA.
63 (Default) 2.46 Amps
64
65 - cirrus,amp-gain-zc : Boolean to determine if to use Amplifier gain-change
66 zero-cross
67
68Optional H/G Algorithm sub-node:
69
70 The cs35l35 node can have a single "cirrus,classh-internal-algo" sub-node
71 that will disable automatic control of the internal H/G Algorithm.
72
73 It is strongly recommended that the Datasheet be referenced when adjusting
74 or using these Class H Algorithm controls over the internal Algorithm.
75 Serious damage can occur to the Device and surrounding components.
76
77 - cirrus,classh-internal-algo : Sub-node for the Internal Class H Algorithm
78 See Section 4.3 Internal Class H Algorithm in the Datasheet.
79 If not used, the device manages the ClassH Algorithm internally.
80
81Optional properties for the "cirrus,classh-internal-algo" Sub-node
82
83 Section 7.29 Class H Control
84 - cirrus,classh-bst-overide : Boolean
85 - cirrus,classh-bst-max-limit
86 - cirrus,classh-mem-depth
87
88 Section 7.30 Class H Headroom Control
89 - cirrus,classh-headroom
90
91 Section 7.31 Class H Release Rate
92 - cirrus,classh-release-rate
93
94 Section 7.32 Class H Weak FET Drive Control
95 - cirrus,classh-wk-fet-disable
96 - cirrus,classh-wk-fet-delay
97 - cirrus,classh-wk-fet-thld
98
99 Section 7.34 Class H VP Control
100 - cirrus,classh-vpch-auto
101 - cirrus,classh-vpch-rate
102 - cirrus,classh-vpch-man
103
104Optional Monitor Signal Format sub-node:
105
106 The cs35l35 node can have a single "cirrus,monitor-signal-format" sub-node
107 for adjusting the Depth, Location and Frame of the Monitoring Signals
108 for Algorithms.
109
110 See Sections 4.8.2 through 4.8.4 Serial-Port Control in the Datasheet
111
112 -cirrus,monitor-signal-format : Sub-node for the Monitor Signaling Formating
113 on the I2S Port. Each of the 3 8 bit values in the array contain the settings
114 for depth, location, and frame.
115
116 If not used, the defaults for the 6 monitor signals is used.
117
118 Sections 7.44 - 7.53 lists values for the depth, location, and frame
119 for each monitoring signal.
120
121 - cirrus,imon : 4 8 bit values to set the depth, location, frame and ADC
122 scale of the IMON monitor signal.
123
124 - cirrus,vmon : 3 8 bit values to set the depth, location, and frame
125 of the VMON monitor signal.
126
127 - cirrus,vpmon : 3 8 bit values to set the depth, location, and frame
128 of the VPMON monitor signal.
129
130 - cirrus,vbstmon : 3 8 bit values to set the depth, location, and frame
131 of the VBSTMON monitor signal
132
133 - cirrus,vpbrstat : 3 8 bit values to set the depth, location, and frame
134 of the VPBRSTAT monitor signal
135
136 - cirrus,zerofill : 3 8 bit values to set the depth, location, and frame\
137 of the ZEROFILL packet in the monitor signal
138
139Example:
140
141cs35l35: cs35l35@20 {
142 compatible = "cirrus,cs35l35";
143 reg = <0x20>;
144 VA-supply = <&dummy_vreg>;
145 VP-supply = <&dummy_vreg>;
146 reset-gpios = <&axi_gpio 54 0>;
147 interrupt-parent = <&gpio8>;
148 interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
149 cirrus,boost-ctl-millivolt = <9000>;
150
151 cirrus,stereo-config;
152 cirrus,audio-channel = <0x00>;
153 cirrus,advisory-channel = <0x01>;
154 cirrus,shared-boost;
155
156 cirrus,classh-internal-algo {
157 cirrus,classh-bst-overide;
158 cirrus,classh-bst-max-limit = <0x01>;
159 cirrus,classh-mem-depth = <0x01>;
160 cirrus,classh-release-rate = <0x08>;
161 cirrus,classh-headroom-millivolt = <0x0B>;
162 cirrus,classh-wk-fet-disable = <0x01>;
163 cirrus,classh-wk-fet-delay = <0x04>;
164 cirrus,classh-wk-fet-thld = <0x01>;
165 cirrus,classh-vpch-auto = <0x01>;
166 cirrus,classh-vpch-rate = <0x02>;
167 cirrus,classh-vpch-man = <0x05>;
168 };
169
170 /* Depth, Location, Frame */
171 cirrus,monitor-signal-format {
172 cirrus,imon = /bits/ 8 <0x03 0x00 0x01>;
173 cirrus,vmon = /bits/ 8 <0x03 0x00 0x00>;
174 cirrus,vpmon = /bits/ 8 <0x03 0x04 0x00>;
175 cirrus,vbstmon = /bits/ 8 <0x03 0x04 0x01>;
176 cirrus,vpbrstat = /bits/ 8 <0x00 0x04 0x00>;
177 cirrus,zerofill = /bits/ 8 <0x00 0x00 0x00>;
178 };
179
180};
diff --git a/Documentation/devicetree/bindings/sound/dioo,dio2125.txt b/Documentation/devicetree/bindings/sound/dioo,dio2125.txt
new file mode 100644
index 000000000000..63dbfe0f11d0
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/dioo,dio2125.txt
@@ -0,0 +1,12 @@
1DIO2125 Audio Driver
2
3Required properties:
4- compatible : "dioo,dio2125"
5- enable-gpios : the gpio connected to the enable pin of the dio2125
6
7Example:
8
9amp: analog-amplifier {
10 compatible = "dioo,dio2125";
11 enable-gpios = <&gpio GPIOH_3 0>;
12};
diff --git a/Documentation/devicetree/bindings/sound/everest,es7134.txt b/Documentation/devicetree/bindings/sound/everest,es7134.txt
new file mode 100644
index 000000000000..5495a3cb8b7b
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/everest,es7134.txt
@@ -0,0 +1,10 @@
1ES7134 i2s DA converter
2
3Required properties:
4- compatible : "everest,es7134" or "everest,es7144"
5
6Example:
7
8i2s_codec: external-codec {
9 compatible = "everest,es7134";
10};
diff --git a/Documentation/devicetree/bindings/sound/fsl,ssi.txt b/Documentation/devicetree/bindings/sound/fsl,ssi.txt
index 5b76be45d18b..d415888e1316 100644
--- a/Documentation/devicetree/bindings/sound/fsl,ssi.txt
+++ b/Documentation/devicetree/bindings/sound/fsl,ssi.txt
@@ -20,24 +20,8 @@ Required properties:
20 have. 20 have.
21- interrupt-parent: The phandle for the interrupt controller that 21- interrupt-parent: The phandle for the interrupt controller that
22 services interrupts for this device. 22 services interrupts for this device.
23- fsl,playback-dma: Phandle to a node for the DMA channel to use for
24 playback of audio. This is typically dictated by SOC
25 design. See the notes below.
26- fsl,capture-dma: Phandle to a node for the DMA channel to use for
27 capture (recording) of audio. This is typically dictated
28 by SOC design. See the notes below.
29- fsl,fifo-depth: The number of elements in the transmit and receive FIFOs. 23- fsl,fifo-depth: The number of elements in the transmit and receive FIFOs.
30 This number is the maximum allowed value for SFCSR[TFWM0]. 24 This number is the maximum allowed value for SFCSR[TFWM0].
31- fsl,ssi-asynchronous:
32 If specified, the SSI is to be programmed in asynchronous
33 mode. In this mode, pins SRCK, STCK, SRFS, and STFS must
34 all be connected to valid signals. In synchronous mode,
35 SRCK and SRFS are ignored. Asynchronous mode allows
36 playback and capture to use different sample sizes and
37 sample rates. Some drivers may require that SRCK and STCK
38 be connected together, and SRFS and STFS be connected
39 together. This would still allow different sample sizes,
40 but not different sample rates.
41 - clocks: "ipg" - Required clock for the SSI unit 25 - clocks: "ipg" - Required clock for the SSI unit
42 "baud" - Required clock for SSI master mode. Otherwise this 26 "baud" - Required clock for SSI master mode. Otherwise this
43 clock is not used 27 clock is not used
@@ -61,6 +45,24 @@ Optional properties:
61- fsl,mode: The operating mode for the AC97 interface only. 45- fsl,mode: The operating mode for the AC97 interface only.
62 "ac97-slave" - AC97 mode, SSI is clock slave 46 "ac97-slave" - AC97 mode, SSI is clock slave
63 "ac97-master" - AC97 mode, SSI is clock master 47 "ac97-master" - AC97 mode, SSI is clock master
48- fsl,ssi-asynchronous:
49 If specified, the SSI is to be programmed in asynchronous
50 mode. In this mode, pins SRCK, STCK, SRFS, and STFS must
51 all be connected to valid signals. In synchronous mode,
52 SRCK and SRFS are ignored. Asynchronous mode allows
53 playback and capture to use different sample sizes and
54 sample rates. Some drivers may require that SRCK and STCK
55 be connected together, and SRFS and STFS be connected
56 together. This would still allow different sample sizes,
57 but not different sample rates.
58- fsl,playback-dma: Phandle to a node for the DMA channel to use for
59 playback of audio. This is typically dictated by SOC
60 design. See the notes below.
61 Only used on Power Architecture.
62- fsl,capture-dma: Phandle to a node for the DMA channel to use for
63 capture (recording) of audio. This is typically dictated
64 by SOC design. See the notes below.
65 Only used on Power Architecture.
64 66
65Child 'codec' node required properties: 67Child 'codec' node required properties:
66- compatible: Compatible list, contains the name of the codec 68- compatible: Compatible list, contains the name of the codec
diff --git a/Documentation/devicetree/bindings/sound/hisilicon,hi6210-i2s.txt b/Documentation/devicetree/bindings/sound/hisilicon,hi6210-i2s.txt
new file mode 100644
index 000000000000..7a296784eb37
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/hisilicon,hi6210-i2s.txt
@@ -0,0 +1,42 @@
1* Hisilicon 6210 i2s controller
2
3Required properties:
4
5- compatible: should be one of the following:
6 - "hisilicon,hi6210-i2s"
7- reg: physical base address of the i2s controller unit and length of
8 memory mapped region.
9- interrupts: should contain the i2s interrupt.
10- clocks: a list of phandle + clock-specifier pairs, one for each entry
11 in clock-names.
12- clock-names: should contain following:
13 - "dacodec"
14 - "i2s-base"
15- dmas: DMA specifiers for tx dma. See the DMA client binding,
16 Documentation/devicetree/bindings/dma/dma.txt
17- dma-names: should be "tx" and "rx"
18- hisilicon,sysctrl-syscon: phandle to sysctrl syscon
19- #sound-dai-cells: Should be set to 1 (for multi-dai)
20 - The dai cell indexes reference the following interfaces:
21 0: S2 interface
22 (Currently that is the only one available, but more may be
23 supported in the future)
24
25Example for the hi6210 i2s controller:
26
27i2s0: i2s@f7118000{
28 compatible = "hisilicon,hi6210-i2s";
29 reg = <0x0 0xf7118000 0x0 0x8000>; /* i2s unit */
30 interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>; /* 155 "DigACodec_intr"-32 */
31 clocks = <&sys_ctrl HI6220_DACODEC_PCLK>,
32 <&sys_ctrl HI6220_BBPPLL0_DIV>;
33 clock-names = "dacodec", "i2s-base";
34 dmas = <&dma0 15 &dma0 14>;
35 dma-names = "rx", "tx";
36 hisilicon,sysctrl-syscon = <&sys_ctrl>;
37 #sound-dai-cells = <1>;
38};
39
40Then when referencing the i2s controller:
41 sound-dai = <&i2s0 0>; /* index 0 => S2 interface */
42
diff --git a/Documentation/devicetree/bindings/sound/max98925.txt b/Documentation/devicetree/bindings/sound/max98925.txt
deleted file mode 100644
index 27be63e2aa0d..000000000000
--- a/Documentation/devicetree/bindings/sound/max98925.txt
+++ /dev/null
@@ -1,22 +0,0 @@
1max98925 audio CODEC
2
3This device supports I2C.
4
5Required properties:
6
7 - compatible : "maxim,max98925"
8
9 - vmon-slot-no : slot number used to send voltage information
10
11 - imon-slot-no : slot number used to send current information
12
13 - reg : the I2C address of the device for I2C
14
15Example:
16
17codec: max98925@1a {
18 compatible = "maxim,max98925";
19 vmon-slot-no = <0>;
20 imon-slot-no = <2>;
21 reg = <0x1a>;
22};
diff --git a/Documentation/devicetree/bindings/sound/max98926.txt b/Documentation/devicetree/bindings/sound/max98926.txt
deleted file mode 100644
index 0b7f4e4d5f9a..000000000000
--- a/Documentation/devicetree/bindings/sound/max98926.txt
+++ /dev/null
@@ -1,32 +0,0 @@
1max98926 audio CODEC
2
3This device supports I2C.
4
5Required properties:
6
7 - compatible : "maxim,max98926"
8
9 - vmon-slot-no : slot number used to send voltage information
10 or in inteleave mode this will be used as
11 interleave slot.
12
13 - imon-slot-no : slot number used to send current information
14
15 - interleave-mode : When using two MAX98926 in a system it is
16 possible to create ADC data that that will
17 overflow the frame size. Digital Audio Interleave
18 mode provides a means to output VMON and IMON data
19 from two devices on a single DOUT line when running
20 smaller frames sizes such as 32 BCLKS per LRCLK or
21 48 BCLKS per LRCLK.
22
23 - reg : the I2C address of the device for I2C
24
25Example:
26
27codec: max98926@1a {
28 compatible = "maxim,max98926";
29 vmon-slot-no = <0>;
30 imon-slot-no = <2>;
31 reg = <0x1a>;
32};
diff --git a/Documentation/devicetree/bindings/sound/max9892x.txt b/Documentation/devicetree/bindings/sound/max9892x.txt
new file mode 100644
index 000000000000..f6171591ddc6
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/max9892x.txt
@@ -0,0 +1,41 @@
1Maxim Integrated MAX98925/MAX98926/MAX98927 Speaker Amplifier
2
3This device supports I2C.
4
5Required properties:
6
7 - compatible : should be one of the following
8 - "maxim,max98925"
9 - "maxim,max98926"
10 - "maxim,max98927"
11
12 - vmon-slot-no : slot number used to send voltage information
13 or in inteleave mode this will be used as
14 interleave slot.
15 MAX98925/MAX98926 slot range : 0 ~ 30, Default : 0
16 MAX98927 slot range : 0 ~ 15, Default : 0
17
18 - imon-slot-no : slot number used to send current information
19 MAX98925/MAX98926 slot range : 0 ~ 30, Default : 0
20 MAX98927 slot range : 0 ~ 15, Default : 0
21
22 - interleave-mode : When using two MAX9892X in a system it is
23 possible to create ADC data that that will
24 overflow the frame size. Digital Audio Interleave
25 mode provides a means to output VMON and IMON data
26 from two devices on a single DOUT line when running
27 smaller frames sizes such as 32 BCLKS per LRCLK or
28 48 BCLKS per LRCLK.
29 Range : 0 (off), 1 (on), Default : 0
30
31 - reg : the I2C address of the device for I2C
32
33Example:
34
35codec: max98927@3a {
36 compatible = "maxim,max98927";
37 vmon-slot-no = <0>;
38 imon-slot-no = <1>;
39 interleave-mode = <0>;
40 reg = <0x3a>;
41};
diff --git a/Documentation/devicetree/bindings/sound/mt2701-wm8960.txt b/Documentation/devicetree/bindings/sound/mt2701-wm8960.txt
new file mode 100644
index 000000000000..809b609ea9d0
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/mt2701-wm8960.txt
@@ -0,0 +1,24 @@
1MT2701 with WM8960 CODEC
2
3Required properties:
4- compatible: "mediatek,mt2701-wm8960-machine"
5- mediatek,platform: the phandle of MT2701 ASoC platform
6- audio-routing: a list of the connections between audio
7- mediatek,audio-codec: the phandles of wm8960 codec
8- pinctrl-names: Should contain only one value - "default"
9- pinctrl-0: Should specify pin control groups used for this controller.
10
11Example:
12
13 sound:sound {
14 compatible = "mediatek,mt2701-wm8960-machine";
15 mediatek,platform = <&afe>;
16 audio-routing =
17 "Headphone", "HP_L",
18 "Headphone", "HP_R",
19 "LINPUT1", "AMIC",
20 "RINPUT1", "AMIC";
21 mediatek,audio-codec = <&wm8960>;
22 pinctrl-names = "default";
23 pinctrl-0 = <&aud_pins_default>;
24 };
diff --git a/Documentation/devicetree/bindings/sound/nau8824.txt b/Documentation/devicetree/bindings/sound/nau8824.txt
new file mode 100644
index 000000000000..e0058b97e49a
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nau8824.txt
@@ -0,0 +1,88 @@
1Nuvoton NAU8824 audio codec
2
3This device supports I2C only.
4
5Required properties:
6 - compatible : Must be "nuvoton,nau8824"
7
8 - reg : the I2C address of the device. This is either 0x1a (CSB=0) or 0x1b (CSB=1).
9
10Optional properties:
11 - nuvoton,jkdet-polarity: JKDET pin polarity. 0 - active high, 1 - active low.
12
13 - nuvoton,vref-impedance: VREF Impedance selection
14 0 - Open
15 1 - 25 kOhm
16 2 - 125 kOhm
17 3 - 2.5 kOhm
18
19 - nuvoton,micbias-voltage: Micbias voltage level.
20 0 - VDDA
21 1 - VDDA
22 2 - VDDA * 1.1
23 3 - VDDA * 1.2
24 4 - VDDA * 1.3
25 5 - VDDA * 1.4
26 6 - VDDA * 1.53
27 7 - VDDA * 1.53
28
29 - nuvoton,sar-threshold-num: Number of buttons supported
30 - nuvoton,sar-threshold: Impedance threshold for each button. Array that contains up to 8 buttons configuration. SAR value is calculated as
31 SAR = 255 * MICBIAS / SAR_VOLTAGE * R / (2000 + R)
32 where MICBIAS is configured by 'nuvoton,micbias-voltage', SAR_VOLTAGE is configured by 'nuvoton,sar-voltage', R - button impedance.
33 Refer datasheet section 10.2 for more information about threshold calculation.
34
35 - nuvoton,sar-hysteresis: Button impedance measurement hysteresis.
36
37 - nuvoton,sar-voltage: Reference voltage for button impedance measurement.
38 0 - VDDA
39 1 - VDDA
40 2 - VDDA * 1.1
41 3 - VDDA * 1.2
42 4 - VDDA * 1.3
43 5 - VDDA * 1.4
44 6 - VDDA * 1.53
45 7 - VDDA * 1.53
46
47 - nuvoton,sar-compare-time: SAR compare time
48 0 - 500 ns
49 1 - 1 us
50 2 - 2 us
51 3 - 4 us
52
53 - nuvoton,sar-sampling-time: SAR sampling time
54 0 - 2 us
55 1 - 4 us
56 2 - 8 us
57 3 - 16 us
58
59 - nuvoton,short-key-debounce: Button short key press debounce time.
60 0 - 30 ms
61 1 - 50 ms
62 2 - 100 ms
63
64 - nuvoton,jack-eject-debounce: Jack ejection debounce time.
65 0 - 0 ms
66 1 - 1 ms
67 2 - 10 ms
68
69
70Example:
71
72 headset: nau8824@1a {
73 compatible = "nuvoton,nau8824";
74 reg = <0x1a>;
75 interrupt-parent = <&gpio>;
76 interrupts = <TEGRA_GPIO(E, 6) IRQ_TYPE_LEVEL_LOW>;
77 nuvoton,vref-impedance = <2>;
78 nuvoton,micbias-voltage = <6>;
79 // Setup 4 buttons impedance according to Android specification
80 nuvoton,sar-threshold-num = <4>;
81 nuvoton,sar-threshold = <0xc 0x1e 0x38 0x60>;
82 nuvoton,sar-hysteresis = <0>;
83 nuvoton,sar-voltage = <6>;
84 nuvoton,sar-compare-time = <1>;
85 nuvoton,sar-sampling-time = <1>;
86 nuvoton,short-key-debounce = <0>;
87 nuvoton,jack-eject-debounce = <1>;
88 };
diff --git a/Documentation/devicetree/bindings/sound/rockchip-i2s.txt b/Documentation/devicetree/bindings/sound/rockchip-i2s.txt
index a6600f6dea64..206aba1b34bb 100644
--- a/Documentation/devicetree/bindings/sound/rockchip-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/rockchip-i2s.txt
@@ -9,6 +9,7 @@ Required properties:
9 - "rockchip,rk3066-i2s": for rk3066 9 - "rockchip,rk3066-i2s": for rk3066
10 - "rockchip,rk3188-i2s", "rockchip,rk3066-i2s": for rk3188 10 - "rockchip,rk3188-i2s", "rockchip,rk3066-i2s": for rk3188
11 - "rockchip,rk3288-i2s", "rockchip,rk3066-i2s": for rk3288 11 - "rockchip,rk3288-i2s", "rockchip,rk3066-i2s": for rk3288
12 - "rockchip,rk3368-i2s", "rockchip,rk3066-i2s": for rk3368
12 - "rockchip,rk3399-i2s", "rockchip,rk3066-i2s": for rk3399 13 - "rockchip,rk3399-i2s", "rockchip,rk3066-i2s": for rk3399
13- reg: physical base address of the controller and length of memory mapped 14- reg: physical base address of the controller and length of memory mapped
14 region. 15 region.
diff --git a/Documentation/devicetree/bindings/sound/samsung,odroid.txt b/Documentation/devicetree/bindings/sound/samsung,odroid.txt
new file mode 100644
index 000000000000..c1ac70cb0afb
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/samsung,odroid.txt
@@ -0,0 +1,57 @@
1Samsung Exynos Odroid XU3/XU4 audio complex with MAX98090 codec
2
3Required properties:
4
5 - compatible - "samsung,odroidxu3-audio" - for Odroid XU3 board,
6 "samsung,odroidxu4-audio" - for Odroid XU4 board
7 - model - the user-visible name of this sound complex
8 - 'cpu' subnode with a 'sound-dai' property containing the phandle of the I2S
9 controller
10 - 'codec' subnode with a 'sound-dai' property containing list of phandles
11 to the CODEC nodes, first entry must be corresponding to the MAX98090
12 CODEC and the second entry must be the phandle of the HDMI IP block node
13 - clocks - should contain entries matching clock names in the clock-names
14 property
15 - clock-names - should contain following entries:
16 - "epll" - indicating the EPLL output clock
17 - "i2s_rclk" - indicating the RCLK (root) clock of the I2S0 controller
18 - samsung,audio-widgets - this property specifies off-codec audio elements
19 like headphones or speakers, for details see widgets.txt
20 - samsung,audio-routing - a list of the connections between audio
21 components; each entry is a pair of strings, the first being the
22 connection's sink, the second being the connection's source;
23 valid names for sources and sinks are the MAX98090's pins (as
24 documented in its binding), and the jacks on the board
25
26 For Odroid X2:
27 "Headphone Jack", "Mic Jack", "DMIC"
28
29 For Odroid U3, XU3:
30 "Headphone Jack", "Speakers"
31
32 For Odroid XU4:
33 no entries
34
35Example:
36
37sound {
38 compatible = "samsung,odroidxu3-audio";
39 samsung,cpu-dai = <&i2s0>;
40 samsung,codec-dai = <&max98090>;
41 model = "Odroid-XU3";
42 samsung,audio-routing =
43 "Headphone Jack", "HPL",
44 "Headphone Jack", "HPR",
45 "IN1", "Mic Jack",
46 "Mic Jack", "MICBIAS";
47
48 clocks = <&clock CLK_FOUT_EPLL>, <&i2s0 CLK_I2S_RCLK_SRC>;
49 clock-names = "epll", "sclk_i2s";
50
51 cpu {
52 sound-dai = <&i2s0 0>;
53 };
54 codec {
55 sound-dai = <&hdmi>, <&max98090>;
56 };
57};
diff --git a/Documentation/devicetree/bindings/sound/sgtl5000.txt b/Documentation/devicetree/bindings/sound/sgtl5000.txt
index 5666da7b8605..7a73a9d62015 100644
--- a/Documentation/devicetree/bindings/sound/sgtl5000.txt
+++ b/Documentation/devicetree/bindings/sound/sgtl5000.txt
@@ -26,6 +26,15 @@ Optional properties:
26 If this node is not mentioned or the value is unknown, then 26 If this node is not mentioned or the value is unknown, then
27 the value is set to 1.25V. 27 the value is set to 1.25V.
28 28
29- lrclk-strength: the LRCLK pad strength. Possible values are:
300, 1, 2 and 3 as per the table below:
31
32VDDIO 1.8V 2.5V 3.3V
330 = Disable
341 = 1.66 mA 2.87 mA 4.02 mA
352 = 3.33 mA 5.74 mA 8.03 mA
363 = 4.99 mA 8.61 mA 12.05 mA
37
29Example: 38Example:
30 39
31codec: sgtl5000@0a { 40codec: sgtl5000@0a {
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
new file mode 100644
index 000000000000..c59a3d779e06
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
@@ -0,0 +1,89 @@
1STMicroelectronics STM32 Serial Audio Interface (SAI).
2
3The SAI interface (Serial Audio Interface) offers a wide set of audio protocols
4as I2S standards, LSB or MSB-justified, PCM/DSP, TDM, and AC'97.
5The SAI contains two independent audio sub-blocks. Each sub-block has
6its own clock generator and I/O lines controller.
7
8Required properties:
9 - compatible: Should be "st,stm32f4-sai"
10 - reg: Base address and size of SAI common register set.
11 - clocks: Must contain phandle and clock specifier pairs for each entry
12 in clock-names.
13 - clock-names: Must contain "x8k" and "x11k"
14 "x8k": SAI parent clock for sampling rates multiple of 8kHz.
15 "x11k": SAI parent clock for sampling rates multiple of 11.025kHz.
16 - interrupts: cpu DAI interrupt line shared by SAI sub-blocks
17
18Optional properties:
19 - resets: Reference to a reset controller asserting the SAI
20
21SAI subnodes:
22Two subnodes corresponding to SAI sub-block instances A et B can be defined.
23Subnode can be omitted for unsused sub-block.
24
25SAI subnodes required properties:
26 - compatible: Should be "st,stm32-sai-sub-a" or "st,stm32-sai-sub-b"
27 for SAI sub-block A or B respectively.
28 - reg: Base address and size of SAI sub-block register set.
29 - clocks: Must contain one phandle and clock specifier pair
30 for sai_ck which feeds the internal clock generator.
31 - clock-names: Must contain "sai_ck".
32 - dmas: see Documentation/devicetree/bindings/dma/stm32-dma.txt
33 - dma-names: identifier string for each DMA request line
34 "tx": if sai sub-block is configured as playback DAI
35 "rx": if sai sub-block is configured as capture DAI
36 - pinctrl-names: should contain only value "default"
37 - pinctrl-0: see Documentation/devicetree/bindings/pinctrl/pinctrl-stm32.txt
38
39Example:
40sound_card {
41 compatible = "audio-graph-card";
42 dais = <&sai1b_port>;
43};
44
45sai1: sai1@40015800 {
46 compatible = "st,stm32f4-sai";
47 #address-cells = <1>;
48 #size-cells = <1>;
49 ranges;
50 reg = <0x40015800 0x4>;
51 clocks = <&rcc 1 CLK_SAIQ_PDIV>, <&rcc 1 CLK_I2SQ_PDIV>;
52 clock-names = "x8k", "x11k";
53 interrupts = <87>;
54
55 sai1b: audio-controller@40015824 {
56 #sound-dai-cells = <0>;
57 compatible = "st,stm32-sai-sub-b";
58 reg = <0x40015824 0x1C>;
59 clocks = <&rcc 1 CLK_SAI2>;
60 clock-names = "sai_ck";
61 dmas = <&dma2 5 0 0x400 0x0>;
62 dma-names = "tx";
63 pinctrl-names = "default";
64 pinctrl-0 = <&pinctrl_sai1b>;
65
66 ports {
67 #address-cells = <1>;
68 #size-cells = <0>;
69
70 sai1b_port: port@0 {
71 reg = <0>;
72 cpu_endpoint: endpoint {
73 remote-endpoint = <&codec_endpoint>;
74 audio-graph-card,format = "i2s";
75 audio-graph-card,bitclock-master = <&codec_endpoint>;
76 audio-graph-card,frame-master = <&codec_endpoint>;
77 };
78 };
79 };
80 };
81};
82
83audio-codec {
84 codec_port: port {
85 codec_endpoint: endpoint {
86 remote-endpoint = <&cpu_endpoint>;
87 };
88 };
89};
diff --git a/Documentation/devicetree/bindings/sound/tas2552.txt b/Documentation/devicetree/bindings/sound/tas2552.txt
index c49992c0b62a..2d71eb05c1d3 100644
--- a/Documentation/devicetree/bindings/sound/tas2552.txt
+++ b/Documentation/devicetree/bindings/sound/tas2552.txt
@@ -5,7 +5,8 @@ The tas2552 serial control bus communicates through I2C protocols
5Required properties: 5Required properties:
6 - compatible - One of: 6 - compatible - One of:
7 "ti,tas2552" - TAS2552 7 "ti,tas2552" - TAS2552
8 - reg - I2C slave address 8 - reg - I2C slave address: it can be 0x40 if ADDR pin is 0
9 or 0x41 if ADDR pin is 1.
9 - supply-*: Required supply regulators are: 10 - supply-*: Required supply regulators are:
10 "vbat" battery voltage 11 "vbat" battery voltage
11 "iovdd" I/O Voltage 12 "iovdd" I/O Voltage
@@ -14,17 +15,20 @@ Required properties:
14Optional properties: 15Optional properties:
15 - enable-gpio - gpio pin to enable/disable the device 16 - enable-gpio - gpio pin to enable/disable the device
16 17
17tas2552 can receive it's reference clock via MCLK, BCLK, IVCLKIN pin or use the 18tas2552 can receive its reference clock via MCLK, BCLK, IVCLKIN pin or use the
18internal 1.8MHz. This CLKIN is used by the PLL. In addition to PLL, the PDM 19internal 1.8MHz. This CLKIN is used by the PLL. In addition to PLL, the PDM
19reference clock is also selectable: PLL, IVCLKIN, BCLK or MCLK. 20reference clock is also selectable: PLL, IVCLKIN, BCLK or MCLK.
20For system integration the dt-bindings/sound/tas2552.h header file provides 21For system integration the dt-bindings/sound/tas2552.h header file provides
21defined values to selct and configure the PLL and PDM reference clocks. 22defined values to select and configure the PLL and PDM reference clocks.
22 23
23Example: 24Example:
24 25
25tas2552: tas2552@41 { 26tas2552: tas2552@41 {
26 compatible = "ti,tas2552"; 27 compatible = "ti,tas2552";
27 reg = <0x41>; 28 reg = <0x41>;
29 vbat-supply = <&reg_vbat>;
30 iovdd-supply = <&reg_iovdd>;
31 avdd-supply = <&reg_avdd>;
28 enable-gpio = <&gpio4 2 GPIO_ACTIVE_HIGH>; 32 enable-gpio = <&gpio4 2 GPIO_ACTIVE_HIGH>;
29}; 33};
30 34
diff --git a/Documentation/devicetree/bindings/sound/wm8903.txt b/Documentation/devicetree/bindings/sound/wm8903.txt
index 94ec32c194bb..afc51caf1137 100644
--- a/Documentation/devicetree/bindings/sound/wm8903.txt
+++ b/Documentation/devicetree/bindings/sound/wm8903.txt
@@ -28,6 +28,14 @@ Optional properties:
28 performed. If any entry has the value 0xffffffff, that GPIO's 28 performed. If any entry has the value 0xffffffff, that GPIO's
29 configuration will not be modified. 29 configuration will not be modified.
30 30
31 - AVDD-supply : Analog power supply regulator on the AVDD pin.
32
33 - CPVDD-supply : Charge pump supply regulator on the CPVDD pin.
34
35 - DBVDD-supply : Digital buffer supply regulator for the DBVDD pin.
36
37 - DCVDD-supply : Digital core supply regulator for the DCVDD pin.
38
31Pins on the device (for linking into audio routes): 39Pins on the device (for linking into audio routes):
32 40
33 * IN1L 41 * IN1L
@@ -54,6 +62,11 @@ codec: wm8903@1a {
54 reg = <0x1a>; 62 reg = <0x1a>;
55 interrupts = < 347 >; 63 interrupts = < 347 >;
56 64
65 AVDD-supply = <&fooreg_a>;
66 CPVDD-supply = <&fooreg_b>;
67 DBVDD-supply = <&fooreg_c>;
68 DCVDC-supply = <&fooreg_d>;
69
57 gpio-controller; 70 gpio-controller;
58 #gpio-cells = <2>; 71 #gpio-cells = <2>;
59 72
diff --git a/Documentation/devicetree/bindings/sound/zte,tdm.txt b/Documentation/devicetree/bindings/sound/zte,tdm.txt
new file mode 100644
index 000000000000..2a07ca655264
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/zte,tdm.txt
@@ -0,0 +1,30 @@
1ZTE TDM DAI driver
2
3Required properties:
4
5- compatible : should be one of the following.
6 * zte,zx296718-tdm
7- reg : physical base address of the controller and length of memory mapped
8 region.
9- clocks : Pairs of phandle and specifier referencing the controller's clocks.
10- clock-names: "wclk" for the wclk.
11 "pclk" for the pclk.
12-#clock-cells: should be 1.
13- zte,tdm-dma-sysctrl : Reference to the sysctrl controller controlling
14 the dma. includes:
15 phandle of sysctrl.
16 register offset in sysctrl for control dma.
17 mask of the register that be written to sysctrl.
18
19Example:
20
21 tdm: tdm@1487000 {
22 compatible = "zte,zx296718-tdm";
23 reg = <0x01487000 0x1000>;
24 clocks = <&audiocrm AUDIO_TDM_WCLK>, <&audiocrm AUDIO_TDM_PCLK>;
25 clock-names = "wclk", "pclk";
26 #clock-cells = <1>;
27 pinctrl-names = "default";
28 pinctrl-0 = <&tdm_global_pin>;
29 zte,tdm-dma-sysctrl = <&sysctrl 0x10c 4>;
30 };
diff --git a/Documentation/extcon/intel-int3496.txt b/Documentation/extcon/intel-int3496.txt
index af0b366c25b7..8155dbc7fad3 100644
--- a/Documentation/extcon/intel-int3496.txt
+++ b/Documentation/extcon/intel-int3496.txt
@@ -20,3 +20,8 @@ Index 1: The output gpio for enabling Vbus output from the device to the otg
20Index 2: The output gpio for muxing of the data pins between the USB host and 20Index 2: The output gpio for muxing of the data pins between the USB host and
21 the USB peripheral controller, write 1 to mux to the peripheral 21 the USB peripheral controller, write 1 to mux to the peripheral
22 controller 22 controller
23
24There is a mapping between indices and GPIO connection IDs as follows
25 id index 0
26 vbus index 1
27 mux index 2
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index fdcfdd79682a..fe25787ff6d4 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -58,8 +58,7 @@ prototypes:
58 int (*permission) (struct inode *, int, unsigned int); 58 int (*permission) (struct inode *, int, unsigned int);
59 int (*get_acl)(struct inode *, int); 59 int (*get_acl)(struct inode *, int);
60 int (*setattr) (struct dentry *, struct iattr *); 60 int (*setattr) (struct dentry *, struct iattr *);
61 int (*getattr) (const struct path *, struct dentry *, struct kstat *, 61 int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
62 u32, unsigned int);
63 ssize_t (*listxattr) (struct dentry *, char *, size_t); 62 ssize_t (*listxattr) (struct dentry *, char *, size_t);
64 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); 63 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
65 void (*update_time)(struct inode *, struct timespec *, int); 64 void (*update_time)(struct inode *, struct timespec *, int);
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 95280079c0b3..5fb17f49f7a2 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -600,3 +600,9 @@ in your dentry operations instead.
600[recommended] 600[recommended]
601 ->readlink is optional for symlinks. Don't set, unless filesystem needs 601 ->readlink is optional for symlinks. Don't set, unless filesystem needs
602 to fake something for readlink(2). 602 to fake something for readlink(2).
603--
604[mandatory]
605 ->getattr() is now passed a struct path rather than a vfsmount and
606 dentry separately, and it now has request_mask and query_flags arguments
607 to specify the fields and sync type requested by statx. Filesystems not
608 supporting any statx-specific features may ignore the new arguments.
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 569211703721..94dd27ef4a76 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -382,8 +382,7 @@ struct inode_operations {
382 int (*permission) (struct inode *, int); 382 int (*permission) (struct inode *, int);
383 int (*get_acl)(struct inode *, int); 383 int (*get_acl)(struct inode *, int);
384 int (*setattr) (struct dentry *, struct iattr *); 384 int (*setattr) (struct dentry *, struct iattr *);
385 int (*getattr) (const struct path *, struct dentry *, struct kstat *, 385 int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
386 u32, unsigned int);
387 ssize_t (*listxattr) (struct dentry *, char *, size_t); 386 ssize_t (*listxattr) (struct dentry *, char *, size_t);
388 void (*update_time)(struct inode *, struct timespec *, int); 387 void (*update_time)(struct inode *, struct timespec *, int);
389 int (*atomic_open)(struct inode *, struct dentry *, struct file *, 388 int (*atomic_open)(struct inode *, struct dentry *, struct file *,
diff --git a/Documentation/gcc-plugins.txt b/Documentation/gcc-plugins.txt
index 891c69464434..433eaefb4aa1 100644
--- a/Documentation/gcc-plugins.txt
+++ b/Documentation/gcc-plugins.txt
@@ -18,8 +18,8 @@ because gcc versions 4.5 and 4.6 are compiled by a C compiler,
18gcc-4.7 can be compiled by a C or a C++ compiler, 18gcc-4.7 can be compiled by a C or a C++ compiler,
19and versions 4.8+ can only be compiled by a C++ compiler. 19and versions 4.8+ can only be compiled by a C++ compiler.
20 20
21Currently the GCC plugin infrastructure supports only the x86, arm and arm64 21Currently the GCC plugin infrastructure supports only the x86, arm, arm64 and
22architectures. 22powerpc architectures.
23 23
24This infrastructure was ported from grsecurity [6] and PaX [7]. 24This infrastructure was ported from grsecurity [6] and PaX [7].
25 25
diff --git a/Documentation/pinctrl.txt b/Documentation/pinctrl.txt
index 54bd5faa8782..f2af35f6d6b2 100644
--- a/Documentation/pinctrl.txt
+++ b/Documentation/pinctrl.txt
@@ -77,9 +77,15 @@ static struct pinctrl_desc foo_desc = {
77 77
78int __init foo_probe(void) 78int __init foo_probe(void)
79{ 79{
80 int error;
81
80 struct pinctrl_dev *pctl; 82 struct pinctrl_dev *pctl;
81 83
82 return pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl); 84 error = pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl);
85 if (error)
86 return error;
87
88 return pinctrl_enable(pctl);
83} 89}
84 90
85To enable the pinctrl subsystem and the subgroups for PINMUX and PINCONF and 91To enable the pinctrl subsystem and the subgroups for PINMUX and PINCONF and
diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst
index 11ec2d93a5e0..61e9c78bd6d1 100644
--- a/Documentation/process/stable-kernel-rules.rst
+++ b/Documentation/process/stable-kernel-rules.rst
@@ -124,7 +124,7 @@ specified in the following format in the sign-off area:
124 124
125.. code-block:: none 125.. code-block:: none
126 126
127 Cc: <stable@vger.kernel.org> # 3.3.x- 127 Cc: <stable@vger.kernel.org> # 3.3.x
128 128
129The tag has the meaning of: 129The tag has the meaning of:
130 130
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 3c248f772ae6..fd106899afd1 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -3377,6 +3377,69 @@ struct kvm_ppc_resize_hpt {
3377 __u32 pad; 3377 __u32 pad;
3378}; 3378};
3379 3379
33804.104 KVM_X86_GET_MCE_CAP_SUPPORTED
3381
3382Capability: KVM_CAP_MCE
3383Architectures: x86
3384Type: system ioctl
3385Parameters: u64 mce_cap (out)
3386Returns: 0 on success, -1 on error
3387
3388Returns supported MCE capabilities. The u64 mce_cap parameter
3389has the same format as the MSR_IA32_MCG_CAP register. Supported
3390capabilities will have the corresponding bits set.
3391
33924.105 KVM_X86_SETUP_MCE
3393
3394Capability: KVM_CAP_MCE
3395Architectures: x86
3396Type: vcpu ioctl
3397Parameters: u64 mcg_cap (in)
3398Returns: 0 on success,
3399 -EFAULT if u64 mcg_cap cannot be read,
3400 -EINVAL if the requested number of banks is invalid,
3401 -EINVAL if requested MCE capability is not supported.
3402
3403Initializes MCE support for use. The u64 mcg_cap parameter
3404has the same format as the MSR_IA32_MCG_CAP register and
3405specifies which capabilities should be enabled. The maximum
3406supported number of error-reporting banks can be retrieved when
3407checking for KVM_CAP_MCE. The supported capabilities can be
3408retrieved with KVM_X86_GET_MCE_CAP_SUPPORTED.
3409
34104.106 KVM_X86_SET_MCE
3411
3412Capability: KVM_CAP_MCE
3413Architectures: x86
3414Type: vcpu ioctl
3415Parameters: struct kvm_x86_mce (in)
3416Returns: 0 on success,
3417 -EFAULT if struct kvm_x86_mce cannot be read,
3418 -EINVAL if the bank number is invalid,
3419 -EINVAL if VAL bit is not set in status field.
3420
3421Inject a machine check error (MCE) into the guest. The input
3422parameter is:
3423
3424struct kvm_x86_mce {
3425 __u64 status;
3426 __u64 addr;
3427 __u64 misc;
3428 __u64 mcg_status;
3429 __u8 bank;
3430 __u8 pad1[7];
3431 __u64 pad2[3];
3432};
3433
3434If the MCE being reported is an uncorrected error, KVM will
3435inject it as an MCE exception into the guest. If the guest
3436MCG_STATUS register reports that an MCE is in progress, KVM
3437causes an KVM_EXIT_SHUTDOWN vmexit.
3438
3439Otherwise, if the MCE is a corrected error, KVM will just
3440store it in the corresponding bank (provided this bank is
3441not holding a previously reported uncorrected error).
3442
33805. The kvm_run structure 34435. The kvm_run structure
3381------------------------ 3444------------------------
3382 3445
diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virtual/kvm/devices/arm-vgic.txt
index 76e61c883347..b2f60ca8b60c 100644
--- a/Documentation/virtual/kvm/devices/arm-vgic.txt
+++ b/Documentation/virtual/kvm/devices/arm-vgic.txt
@@ -83,6 +83,12 @@ Groups:
83 83
84 Bits for undefined preemption levels are RAZ/WI. 84 Bits for undefined preemption levels are RAZ/WI.
85 85
86 For historical reasons and to provide ABI compatibility with userspace we
87 export the GICC_PMR register in the format of the GICH_VMCR.VMPriMask
88 field in the lower 5 bits of a word, meaning that userspace must always
89 use the lower 5 bits to communicate with the KVM device and must shift the
90 value left by 3 places to obtain the actual priority mask level.
91
86 Limitations: 92 Limitations:
87 - Priorities are not implemented, and registers are RAZ/WI 93 - Priorities are not implemented, and registers are RAZ/WI
88 - Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2. 94 - Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2.
diff --git a/MAINTAINERS b/MAINTAINERS
index c776906f67a9..38d3e4ed7208 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2585,12 +2585,26 @@ F: include/uapi/linux/if_bonding.h
2585 2585
2586BPF (Safe dynamic programs and tools) 2586BPF (Safe dynamic programs and tools)
2587M: Alexei Starovoitov <ast@kernel.org> 2587M: Alexei Starovoitov <ast@kernel.org>
2588M: Daniel Borkmann <daniel@iogearbox.net>
2588L: netdev@vger.kernel.org 2589L: netdev@vger.kernel.org
2589L: linux-kernel@vger.kernel.org 2590L: linux-kernel@vger.kernel.org
2590S: Supported 2591S: Supported
2592F: arch/x86/net/bpf_jit*
2593F: Documentation/networking/filter.txt
2594F: include/linux/bpf*
2595F: include/linux/filter.h
2596F: include/uapi/linux/bpf*
2597F: include/uapi/linux/filter.h
2591F: kernel/bpf/ 2598F: kernel/bpf/
2592F: tools/testing/selftests/bpf/ 2599F: kernel/trace/bpf_trace.c
2593F: lib/test_bpf.c 2600F: lib/test_bpf.c
2601F: net/bpf/
2602F: net/core/filter.c
2603F: net/sched/act_bpf.c
2604F: net/sched/cls_bpf.c
2605F: samples/bpf/
2606F: tools/net/bpf*
2607F: tools/testing/selftests/bpf/
2594 2608
2595BROADCOM B44 10/100 ETHERNET DRIVER 2609BROADCOM B44 10/100 ETHERNET DRIVER
2596M: Michael Chan <michael.chan@broadcom.com> 2610M: Michael Chan <michael.chan@broadcom.com>
@@ -3216,7 +3230,6 @@ F: drivers/platform/chrome/
3216 3230
3217CISCO VIC ETHERNET NIC DRIVER 3231CISCO VIC ETHERNET NIC DRIVER
3218M: Christian Benvenuti <benve@cisco.com> 3232M: Christian Benvenuti <benve@cisco.com>
3219M: Sujith Sankar <ssujith@cisco.com>
3220M: Govindarajulu Varadarajan <_govind@gmx.com> 3233M: Govindarajulu Varadarajan <_govind@gmx.com>
3221M: Neel Patel <neepatel@cisco.com> 3234M: Neel Patel <neepatel@cisco.com>
3222S: Supported 3235S: Supported
@@ -4118,14 +4131,13 @@ F: drivers/block/drbd/
4118F: lib/lru_cache.c 4131F: lib/lru_cache.c
4119F: Documentation/blockdev/drbd/ 4132F: Documentation/blockdev/drbd/
4120 4133
4121DRIVER CORE, KOBJECTS, DEBUGFS, KERNFS AND SYSFS 4134DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
4122M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 4135M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
4123T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git 4136T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
4124S: Supported 4137S: Supported
4125F: Documentation/kobject.txt 4138F: Documentation/kobject.txt
4126F: drivers/base/ 4139F: drivers/base/
4127F: fs/debugfs/ 4140F: fs/debugfs/
4128F: fs/kernfs/
4129F: fs/sysfs/ 4141F: fs/sysfs/
4130F: include/linux/debugfs.h 4142F: include/linux/debugfs.h
4131F: include/linux/kobj* 4143F: include/linux/kobj*
@@ -4776,6 +4788,12 @@ L: linux-edac@vger.kernel.org
4776S: Maintained 4788S: Maintained
4777F: drivers/edac/mpc85xx_edac.[ch] 4789F: drivers/edac/mpc85xx_edac.[ch]
4778 4790
4791EDAC-PND2
4792M: Tony Luck <tony.luck@intel.com>
4793L: linux-edac@vger.kernel.org
4794S: Maintained
4795F: drivers/edac/pnd2_edac.[ch]
4796
4779EDAC-PASEMI 4797EDAC-PASEMI
4780M: Egor Martovetsky <egor@pasemi.com> 4798M: Egor Martovetsky <egor@pasemi.com>
4781L: linux-edac@vger.kernel.org 4799L: linux-edac@vger.kernel.org
@@ -4923,6 +4941,7 @@ F: include/linux/netfilter_bridge/
4923F: net/bridge/ 4941F: net/bridge/
4924 4942
4925ETHERNET PHY LIBRARY 4943ETHERNET PHY LIBRARY
4944M: Andrew Lunn <andrew@lunn.ch>
4926M: Florian Fainelli <f.fainelli@gmail.com> 4945M: Florian Fainelli <f.fainelli@gmail.com>
4927L: netdev@vger.kernel.org 4946L: netdev@vger.kernel.org
4928S: Maintained 4947S: Maintained
@@ -7084,9 +7103,9 @@ S: Maintained
7084F: fs/autofs4/ 7103F: fs/autofs4/
7085 7104
7086KERNEL BUILD + files below scripts/ (unless maintained elsewhere) 7105KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
7106M: Masahiro Yamada <yamada.masahiro@socionext.com>
7087M: Michal Marek <mmarek@suse.com> 7107M: Michal Marek <mmarek@suse.com>
7088T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git for-next 7108T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git
7089T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git rc-fixes
7090L: linux-kbuild@vger.kernel.org 7109L: linux-kbuild@vger.kernel.org
7091S: Maintained 7110S: Maintained
7092F: Documentation/kbuild/ 7111F: Documentation/kbuild/
@@ -7203,6 +7222,14 @@ F: arch/mips/include/uapi/asm/kvm*
7203F: arch/mips/include/asm/kvm* 7222F: arch/mips/include/asm/kvm*
7204F: arch/mips/kvm/ 7223F: arch/mips/kvm/
7205 7224
7225KERNFS
7226M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
7227M: Tejun Heo <tj@kernel.org>
7228T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
7229S: Supported
7230F: include/linux/kernfs.h
7231F: fs/kernfs/
7232
7206KEXEC 7233KEXEC
7207M: Eric Biederman <ebiederm@xmission.com> 7234M: Eric Biederman <ebiederm@xmission.com>
7208W: http://kernel.org/pub/linux/utils/kernel/kexec/ 7235W: http://kernel.org/pub/linux/utils/kernel/kexec/
@@ -7774,13 +7801,6 @@ F: include/net/mac80211.h
7774F: net/mac80211/ 7801F: net/mac80211/
7775F: drivers/net/wireless/mac80211_hwsim.[ch] 7802F: drivers/net/wireless/mac80211_hwsim.[ch]
7776 7803
7777MACVLAN DRIVER
7778M: Patrick McHardy <kaber@trash.net>
7779L: netdev@vger.kernel.org
7780S: Maintained
7781F: drivers/net/macvlan.c
7782F: include/linux/if_macvlan.h
7783
7784MAILBOX API 7804MAILBOX API
7785M: Jassi Brar <jassisinghbrar@gmail.com> 7805M: Jassi Brar <jassisinghbrar@gmail.com>
7786L: linux-kernel@vger.kernel.org 7806L: linux-kernel@vger.kernel.org
@@ -7853,6 +7873,8 @@ F: drivers/net/ethernet/marvell/mvneta.*
7853MARVELL MWIFIEX WIRELESS DRIVER 7873MARVELL MWIFIEX WIRELESS DRIVER
7854M: Amitkumar Karwar <akarwar@marvell.com> 7874M: Amitkumar Karwar <akarwar@marvell.com>
7855M: Nishant Sarmukadam <nishants@marvell.com> 7875M: Nishant Sarmukadam <nishants@marvell.com>
7876M: Ganapathi Bhat <gbhat@marvell.com>
7877M: Xinming Hu <huxm@marvell.com>
7856L: linux-wireless@vger.kernel.org 7878L: linux-wireless@vger.kernel.org
7857S: Maintained 7879S: Maintained
7858F: drivers/net/wireless/marvell/mwifiex/ 7880F: drivers/net/wireless/marvell/mwifiex/
@@ -8753,6 +8775,7 @@ W: http://www.linuxfoundation.org/en/Net
8753Q: http://patchwork.ozlabs.org/project/netdev/list/ 8775Q: http://patchwork.ozlabs.org/project/netdev/list/
8754T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git 8776T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
8755T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git 8777T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
8778B: mailto:netdev@vger.kernel.org
8756S: Maintained 8779S: Maintained
8757F: net/ 8780F: net/
8758F: include/net/ 8781F: include/net/
@@ -10814,6 +10837,7 @@ F: drivers/s390/block/dasd*
10814F: block/partitions/ibm.c 10837F: block/partitions/ibm.c
10815 10838
10816S390 NETWORK DRIVERS 10839S390 NETWORK DRIVERS
10840M: Julian Wiedmann <jwi@linux.vnet.ibm.com>
10817M: Ursula Braun <ubraun@linux.vnet.ibm.com> 10841M: Ursula Braun <ubraun@linux.vnet.ibm.com>
10818L: linux-s390@vger.kernel.org 10842L: linux-s390@vger.kernel.org
10819W: http://www.ibm.com/developerworks/linux/linux390/ 10843W: http://www.ibm.com/developerworks/linux/linux390/
@@ -10844,6 +10868,7 @@ S: Supported
10844F: drivers/s390/scsi/zfcp_* 10868F: drivers/s390/scsi/zfcp_*
10845 10869
10846S390 IUCV NETWORK LAYER 10870S390 IUCV NETWORK LAYER
10871M: Julian Wiedmann <jwi@linux.vnet.ibm.com>
10847M: Ursula Braun <ubraun@linux.vnet.ibm.com> 10872M: Ursula Braun <ubraun@linux.vnet.ibm.com>
10848L: linux-s390@vger.kernel.org 10873L: linux-s390@vger.kernel.org
10849W: http://www.ibm.com/developerworks/linux/linux390/ 10874W: http://www.ibm.com/developerworks/linux/linux390/
@@ -12454,7 +12479,6 @@ F: drivers/clk/ti/
12454F: include/linux/clk/ti.h 12479F: include/linux/clk/ti.h
12455 12480
12456TI ETHERNET SWITCH DRIVER (CPSW) 12481TI ETHERNET SWITCH DRIVER (CPSW)
12457M: Mugunthan V N <mugunthanvnm@ti.com>
12458R: Grygorii Strashko <grygorii.strashko@ti.com> 12482R: Grygorii Strashko <grygorii.strashko@ti.com>
12459L: linux-omap@vger.kernel.org 12483L: linux-omap@vger.kernel.org
12460L: netdev@vger.kernel.org 12484L: netdev@vger.kernel.org
@@ -13295,7 +13319,7 @@ F: drivers/virtio/
13295F: tools/virtio/ 13319F: tools/virtio/
13296F: drivers/net/virtio_net.c 13320F: drivers/net/virtio_net.c
13297F: drivers/block/virtio_blk.c 13321F: drivers/block/virtio_blk.c
13298F: include/linux/virtio_*.h 13322F: include/linux/virtio*.h
13299F: include/uapi/linux/virtio_*.h 13323F: include/uapi/linux/virtio_*.h
13300F: drivers/crypto/virtio/ 13324F: drivers/crypto/virtio/
13301 13325
@@ -13383,14 +13407,6 @@ W: https://linuxtv.org
13383S: Maintained 13407S: Maintained
13384F: drivers/media/platform/vivid/* 13408F: drivers/media/platform/vivid/*
13385 13409
13386VLAN (802.1Q)
13387M: Patrick McHardy <kaber@trash.net>
13388L: netdev@vger.kernel.org
13389S: Maintained
13390F: drivers/net/macvlan.c
13391F: include/linux/if_*vlan.h
13392F: net/8021q/
13393
13394VLYNQ BUS 13410VLYNQ BUS
13395M: Florian Fainelli <f.fainelli@gmail.com> 13411M: Florian Fainelli <f.fainelli@gmail.com>
13396L: openwrt-devel@lists.openwrt.org (subscribers-only) 13412L: openwrt-devel@lists.openwrt.org (subscribers-only)
diff --git a/Makefile b/Makefile
index b2faa9319372..779302695453 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 11 2PATCHLEVEL = 11
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc3 4EXTRAVERSION = -rc8
5NAME = Fearless Coyote 5NAME = Fearless Coyote
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -372,7 +372,7 @@ LDFLAGS_MODULE =
372CFLAGS_KERNEL = 372CFLAGS_KERNEL =
373AFLAGS_KERNEL = 373AFLAGS_KERNEL =
374LDFLAGS_vmlinux = 374LDFLAGS_vmlinux =
375CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized 375CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
376CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,) 376CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,)
377 377
378 378
@@ -653,6 +653,12 @@ KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \
653# Tell gcc to never replace conditional load with a non-conditional one 653# Tell gcc to never replace conditional load with a non-conditional one
654KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) 654KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
655 655
656# check for 'asm goto'
657ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
658 KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
659 KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
660endif
661
656include scripts/Makefile.gcc-plugins 662include scripts/Makefile.gcc-plugins
657 663
658ifdef CONFIG_READABLE_ASM 664ifdef CONFIG_READABLE_ASM
@@ -798,12 +804,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types)
798# use the deterministic mode of AR if available 804# use the deterministic mode of AR if available
799KBUILD_ARFLAGS := $(call ar-option,D) 805KBUILD_ARFLAGS := $(call ar-option,D)
800 806
801# check for 'asm goto'
802ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
803 KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
804 KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
805endif
806
807include scripts/Makefile.kasan 807include scripts/Makefile.kasan
808include scripts/Makefile.extrawarn 808include scripts/Makefile.extrawarn
809include scripts/Makefile.ubsan 809include scripts/Makefile.ubsan
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 0b961093ca5c..6d76e528ab8f 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1290,7 +1290,7 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
1290 /* copy relevant bits of struct timex. */ 1290 /* copy relevant bits of struct timex. */
1291 if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) || 1291 if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) ||
1292 copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) - 1292 copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) -
1293 offsetof(struct timex32, time))) 1293 offsetof(struct timex32, tick)))
1294 return -EFAULT; 1294 return -EFAULT;
1295 1295
1296 ret = do_adjtimex(&txc); 1296 ret = do_adjtimex(&txc);
diff --git a/arch/arc/boot/dts/skeleton.dtsi b/arch/arc/boot/dts/skeleton.dtsi
index 65808fe0a290..2891cb266cf0 100644
--- a/arch/arc/boot/dts/skeleton.dtsi
+++ b/arch/arc/boot/dts/skeleton.dtsi
@@ -26,6 +26,7 @@
26 device_type = "cpu"; 26 device_type = "cpu";
27 compatible = "snps,arc770d"; 27 compatible = "snps,arc770d";
28 reg = <0>; 28 reg = <0>;
29 clocks = <&core_clk>;
29 }; 30 };
30 }; 31 };
31 32
diff --git a/arch/arc/boot/dts/skeleton_hs.dtsi b/arch/arc/boot/dts/skeleton_hs.dtsi
index 2dfe8037dfbb..5e944d3e5b74 100644
--- a/arch/arc/boot/dts/skeleton_hs.dtsi
+++ b/arch/arc/boot/dts/skeleton_hs.dtsi
@@ -21,6 +21,7 @@
21 device_type = "cpu"; 21 device_type = "cpu";
22 compatible = "snps,archs38"; 22 compatible = "snps,archs38";
23 reg = <0>; 23 reg = <0>;
24 clocks = <&core_clk>;
24 }; 25 };
25 }; 26 };
26 27
diff --git a/arch/arc/boot/dts/skeleton_hs_idu.dtsi b/arch/arc/boot/dts/skeleton_hs_idu.dtsi
index 4c11079f3565..54b277d7dea0 100644
--- a/arch/arc/boot/dts/skeleton_hs_idu.dtsi
+++ b/arch/arc/boot/dts/skeleton_hs_idu.dtsi
@@ -19,8 +19,27 @@
19 19
20 cpu@0 { 20 cpu@0 {
21 device_type = "cpu"; 21 device_type = "cpu";
22 compatible = "snps,archs38xN"; 22 compatible = "snps,archs38";
23 reg = <0>; 23 reg = <0>;
24 clocks = <&core_clk>;
25 };
26 cpu@1 {
27 device_type = "cpu";
28 compatible = "snps,archs38";
29 reg = <1>;
30 clocks = <&core_clk>;
31 };
32 cpu@2 {
33 device_type = "cpu";
34 compatible = "snps,archs38";
35 reg = <2>;
36 clocks = <&core_clk>;
37 };
38 cpu@3 {
39 device_type = "cpu";
40 compatible = "snps,archs38";
41 reg = <3>;
42 clocks = <&core_clk>;
24 }; 43 };
25 }; 44 };
26 45
diff --git a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
index f0df59b23e21..459fc656b759 100644
--- a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
@@ -112,13 +112,19 @@
112 interrupts = <7>; 112 interrupts = <7>;
113 bus-width = <4>; 113 bus-width = <4>;
114 }; 114 };
115 };
115 116
116 /* Embedded Vision subsystem UIO mappings; only relevant for EV VDK */ 117 /*
117 uio_ev: uio@0xD0000000 { 118 * Embedded Vision subsystem UIO mappings; only relevant for EV VDK
118 compatible = "generic-uio"; 119 *
119 reg = <0xD0000000 0x2000 0xD1000000 0x2000 0x90000000 0x10000000 0xC0000000 0x10000000>; 120 * This node is intentionally put outside of MB above becase
120 reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem"; 121 * it maps areas outside of MB's 0xEz-0xFz.
121 interrupts = <23>; 122 */
122 }; 123 uio_ev: uio@0xD0000000 {
124 compatible = "generic-uio";
125 reg = <0xD0000000 0x2000 0xD1000000 0x2000 0x90000000 0x10000000 0xC0000000 0x10000000>;
126 reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem";
127 interrupt-parent = <&mb_intc>;
128 interrupts = <23>;
123 }; 129 };
124}; 130};
diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h
index 00bdbe167615..2e52d18e6bc7 100644
--- a/arch/arc/include/asm/kprobes.h
+++ b/arch/arc/include/asm/kprobes.h
@@ -54,9 +54,7 @@ int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause);
54void kretprobe_trampoline(void); 54void kretprobe_trampoline(void);
55void trap_is_kprobe(unsigned long address, struct pt_regs *regs); 55void trap_is_kprobe(unsigned long address, struct pt_regs *regs);
56#else 56#else
57static void trap_is_kprobe(unsigned long address, struct pt_regs *regs) 57#define trap_is_kprobe(address, regs)
58{
59}
60#endif /* CONFIG_KPROBES */ 58#endif /* CONFIG_KPROBES */
61 59
62#endif /* _ARC_KPROBES_H */ 60#endif /* _ARC_KPROBES_H */
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index 2585632eaa68..cc558a25b8fa 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -100,15 +100,21 @@ END(handle_interrupt)
100;################### Non TLB Exception Handling ############################# 100;################### Non TLB Exception Handling #############################
101 101
102ENTRY(EV_SWI) 102ENTRY(EV_SWI)
103 flag 1 103 ; TODO: implement this
104 EXCEPTION_PROLOGUE
105 b ret_from_exception
104END(EV_SWI) 106END(EV_SWI)
105 107
106ENTRY(EV_DivZero) 108ENTRY(EV_DivZero)
107 flag 1 109 ; TODO: implement this
110 EXCEPTION_PROLOGUE
111 b ret_from_exception
108END(EV_DivZero) 112END(EV_DivZero)
109 113
110ENTRY(EV_DCError) 114ENTRY(EV_DCError)
111 flag 1 115 ; TODO: implement this
116 EXCEPTION_PROLOGUE
117 b ret_from_exception
112END(EV_DCError) 118END(EV_DCError)
113 119
114; --------------------------------------------- 120; ---------------------------------------------
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 3093fa898a23..fa62404ba58f 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -10,6 +10,7 @@
10#include <linux/fs.h> 10#include <linux/fs.h>
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include <linux/root_dev.h> 12#include <linux/root_dev.h>
13#include <linux/clk.h>
13#include <linux/clk-provider.h> 14#include <linux/clk-provider.h>
14#include <linux/clocksource.h> 15#include <linux/clocksource.h>
15#include <linux/console.h> 16#include <linux/console.h>
@@ -488,8 +489,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
488{ 489{
489 char *str; 490 char *str;
490 int cpu_id = ptr_to_cpu(v); 491 int cpu_id = ptr_to_cpu(v);
491 struct device_node *core_clk = of_find_node_by_name(NULL, "core_clk"); 492 struct device *cpu_dev = get_cpu_device(cpu_id);
492 u32 freq = 0; 493 struct clk *cpu_clk;
494 unsigned long freq = 0;
493 495
494 if (!cpu_online(cpu_id)) { 496 if (!cpu_online(cpu_id)) {
495 seq_printf(m, "processor [%d]\t: Offline\n", cpu_id); 497 seq_printf(m, "processor [%d]\t: Offline\n", cpu_id);
@@ -502,9 +504,15 @@ static int show_cpuinfo(struct seq_file *m, void *v)
502 504
503 seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE)); 505 seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE));
504 506
505 of_property_read_u32(core_clk, "clock-frequency", &freq); 507 cpu_clk = clk_get(cpu_dev, NULL);
508 if (IS_ERR(cpu_clk)) {
509 seq_printf(m, "CPU speed \t: Cannot get clock for processor [%d]\n",
510 cpu_id);
511 } else {
512 freq = clk_get_rate(cpu_clk);
513 }
506 if (freq) 514 if (freq)
507 seq_printf(m, "CPU speed\t: %u.%02u Mhz\n", 515 seq_printf(m, "CPU speed\t: %lu.%02lu Mhz\n",
508 freq / 1000000, (freq / 10000) % 100); 516 freq / 1000000, (freq / 10000) % 100);
509 517
510 seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n", 518 seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n",
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index d408fa21a07c..928562967f3c 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -633,6 +633,9 @@ noinline static void slc_entire_op(const int op)
633 633
634 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1); 634 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
635 635
636 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
637 read_aux_reg(r);
638
636 /* Important to wait for flush to complete */ 639 /* Important to wait for flush to complete */
637 while (read_aux_reg(r) & SLC_CTRL_BUSY); 640 while (read_aux_reg(r) & SLC_CTRL_BUSY);
638} 641}
diff --git a/arch/arm/boot/dts/am335x-baltos.dtsi b/arch/arm/boot/dts/am335x-baltos.dtsi
index efb5eae290a8..d42b98f15e8b 100644
--- a/arch/arm/boot/dts/am335x-baltos.dtsi
+++ b/arch/arm/boot/dts/am335x-baltos.dtsi
@@ -371,6 +371,8 @@
371 371
372 phy1: ethernet-phy@1 { 372 phy1: ethernet-phy@1 {
373 reg = <7>; 373 reg = <7>;
374 eee-broken-100tx;
375 eee-broken-1000t;
374 }; 376 };
375}; 377};
376 378
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 9e43c443738a..9ba4b18c0cb2 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -672,6 +672,7 @@
672 ti,non-removable; 672 ti,non-removable;
673 bus-width = <4>; 673 bus-width = <4>;
674 cap-power-off-card; 674 cap-power-off-card;
675 keep-power-in-suspend;
675 pinctrl-names = "default"; 676 pinctrl-names = "default";
676 pinctrl-0 = <&mmc2_pins>; 677 pinctrl-0 = <&mmc2_pins>;
677 678
diff --git a/arch/arm/boot/dts/am335x-pcm-953.dtsi b/arch/arm/boot/dts/am335x-pcm-953.dtsi
index 02981eae96b9..1ec8e0d80191 100644
--- a/arch/arm/boot/dts/am335x-pcm-953.dtsi
+++ b/arch/arm/boot/dts/am335x-pcm-953.dtsi
@@ -63,14 +63,14 @@
63 label = "home"; 63 label = "home";
64 linux,code = <KEY_HOME>; 64 linux,code = <KEY_HOME>;
65 gpios = <&gpio3 7 GPIO_ACTIVE_HIGH>; 65 gpios = <&gpio3 7 GPIO_ACTIVE_HIGH>;
66 gpio-key,wakeup; 66 wakeup-source;
67 }; 67 };
68 68
69 button@1 { 69 button@1 {
70 label = "menu"; 70 label = "menu";
71 linux,code = <KEY_MENU>; 71 linux,code = <KEY_MENU>;
72 gpios = <&gpio3 8 GPIO_ACTIVE_HIGH>; 72 gpios = <&gpio3 8 GPIO_ACTIVE_HIGH>;
73 gpio-key,wakeup; 73 wakeup-source;
74 }; 74 };
75 75
76 }; 76 };
diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi
index 0d341c545b01..e5ac1d81d15c 100644
--- a/arch/arm/boot/dts/am57xx-idk-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi
@@ -315,6 +315,13 @@
315 /* ID & VBUS GPIOs provided in board dts */ 315 /* ID & VBUS GPIOs provided in board dts */
316 }; 316 };
317 }; 317 };
318
319 tpic2810: tpic2810@60 {
320 compatible = "ti,tpic2810";
321 reg = <0x60>;
322 gpio-controller;
323 #gpio-cells = <2>;
324 };
318}; 325};
319 326
320&mcspi3 { 327&mcspi3 {
@@ -330,13 +337,6 @@
330 spi-max-frequency = <1000000>; 337 spi-max-frequency = <1000000>;
331 spi-cpol; 338 spi-cpol;
332 }; 339 };
333
334 tpic2810: tpic2810@60 {
335 compatible = "ti,tpic2810";
336 reg = <0x60>;
337 gpio-controller;
338 #gpio-cells = <2>;
339 };
340}; 340};
341 341
342&uart3 { 342&uart3 {
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index 4fbb089cf5ad..00de62dc0042 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -66,14 +66,14 @@
66 timer@20200 { 66 timer@20200 {
67 compatible = "arm,cortex-a9-global-timer"; 67 compatible = "arm,cortex-a9-global-timer";
68 reg = <0x20200 0x100>; 68 reg = <0x20200 0x100>;
69 interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>; 69 interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
70 clocks = <&periph_clk>; 70 clocks = <&periph_clk>;
71 }; 71 };
72 72
73 local-timer@20600 { 73 local-timer@20600 {
74 compatible = "arm,cortex-a9-twd-timer"; 74 compatible = "arm,cortex-a9-twd-timer";
75 reg = <0x20600 0x100>; 75 reg = <0x20600 0x100>;
76 interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>; 76 interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
77 clocks = <&periph_clk>; 77 clocks = <&periph_clk>;
78 }; 78 };
79 79
diff --git a/arch/arm/boot/dts/bcm953012k.dts b/arch/arm/boot/dts/bcm953012k.dts
index bfd923096a8c..ae31a5826e91 100644
--- a/arch/arm/boot/dts/bcm953012k.dts
+++ b/arch/arm/boot/dts/bcm953012k.dts
@@ -48,15 +48,14 @@
48 }; 48 };
49 49
50 memory { 50 memory {
51 reg = <0x00000000 0x10000000>; 51 reg = <0x80000000 0x10000000>;
52 }; 52 };
53}; 53};
54 54
55&uart0 { 55&uart0 {
56 clock-frequency = <62499840>; 56 status = "okay";
57}; 57};
58 58
59&uart1 { 59&uart1 {
60 clock-frequency = <62499840>;
61 status = "okay"; 60 status = "okay";
62}; 61};
diff --git a/arch/arm/boot/dts/bcm958522er.dts b/arch/arm/boot/dts/bcm958522er.dts
index 3f04a40eb90c..df05e7f568af 100644
--- a/arch/arm/boot/dts/bcm958522er.dts
+++ b/arch/arm/boot/dts/bcm958522er.dts
@@ -55,6 +55,7 @@
55 gpio-restart { 55 gpio-restart {
56 compatible = "gpio-restart"; 56 compatible = "gpio-restart";
57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; 57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
58 open-source;
58 priority = <200>; 59 priority = <200>;
59 }; 60 };
60}; 61};
diff --git a/arch/arm/boot/dts/bcm958525er.dts b/arch/arm/boot/dts/bcm958525er.dts
index 9fd542200d3d..4a3ab19c6281 100644
--- a/arch/arm/boot/dts/bcm958525er.dts
+++ b/arch/arm/boot/dts/bcm958525er.dts
@@ -55,6 +55,7 @@
55 gpio-restart { 55 gpio-restart {
56 compatible = "gpio-restart"; 56 compatible = "gpio-restart";
57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; 57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
58 open-source;
58 priority = <200>; 59 priority = <200>;
59 }; 60 };
60}; 61};
diff --git a/arch/arm/boot/dts/bcm958525xmc.dts b/arch/arm/boot/dts/bcm958525xmc.dts
index 41e7fd350fcd..81f78435d8c7 100644
--- a/arch/arm/boot/dts/bcm958525xmc.dts
+++ b/arch/arm/boot/dts/bcm958525xmc.dts
@@ -55,6 +55,7 @@
55 gpio-restart { 55 gpio-restart {
56 compatible = "gpio-restart"; 56 compatible = "gpio-restart";
57 gpios = <&gpioa 31 GPIO_ACTIVE_LOW>; 57 gpios = <&gpioa 31 GPIO_ACTIVE_LOW>;
58 open-source;
58 priority = <200>; 59 priority = <200>;
59 }; 60 };
60}; 61};
diff --git a/arch/arm/boot/dts/bcm958622hr.dts b/arch/arm/boot/dts/bcm958622hr.dts
index 477c4860db52..c88b8fefcb2f 100644
--- a/arch/arm/boot/dts/bcm958622hr.dts
+++ b/arch/arm/boot/dts/bcm958622hr.dts
@@ -55,6 +55,7 @@
55 gpio-restart { 55 gpio-restart {
56 compatible = "gpio-restart"; 56 compatible = "gpio-restart";
57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; 57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
58 open-source;
58 priority = <200>; 59 priority = <200>;
59 }; 60 };
60}; 61};
diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts
index c0a499d5ba44..d503fa0dde31 100644
--- a/arch/arm/boot/dts/bcm958623hr.dts
+++ b/arch/arm/boot/dts/bcm958623hr.dts
@@ -55,6 +55,7 @@
55 gpio-restart { 55 gpio-restart {
56 compatible = "gpio-restart"; 56 compatible = "gpio-restart";
57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; 57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
58 open-source;
58 priority = <200>; 59 priority = <200>;
59 }; 60 };
60}; 61};
diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
index f7eb5854a224..cc0363b843c1 100644
--- a/arch/arm/boot/dts/bcm958625hr.dts
+++ b/arch/arm/boot/dts/bcm958625hr.dts
@@ -55,6 +55,7 @@
55 gpio-restart { 55 gpio-restart {
56 compatible = "gpio-restart"; 56 compatible = "gpio-restart";
57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; 57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
58 open-source;
58 priority = <200>; 59 priority = <200>;
59 }; 60 };
60}; 61};
diff --git a/arch/arm/boot/dts/bcm988312hr.dts b/arch/arm/boot/dts/bcm988312hr.dts
index 16666324fda8..74e15a3cd9f8 100644
--- a/arch/arm/boot/dts/bcm988312hr.dts
+++ b/arch/arm/boot/dts/bcm988312hr.dts
@@ -55,6 +55,7 @@
55 gpio-restart { 55 gpio-restart {
56 compatible = "gpio-restart"; 56 compatible = "gpio-restart";
57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; 57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
58 open-source;
58 priority = <200>; 59 priority = <200>;
59 }; 60 };
60}; 61};
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 2c9e56f4aac5..bbfb9d5a70a9 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -283,6 +283,7 @@
283 device_type = "pci"; 283 device_type = "pci";
284 ranges = <0x81000000 0 0 0x03000 0 0x00010000 284 ranges = <0x81000000 0 0 0x03000 0 0x00010000
285 0x82000000 0 0x20013000 0x13000 0 0xffed000>; 285 0x82000000 0 0x20013000 0x13000 0 0xffed000>;
286 bus-range = <0x00 0xff>;
286 #interrupt-cells = <1>; 287 #interrupt-cells = <1>;
287 num-lanes = <1>; 288 num-lanes = <1>;
288 linux,pci-domain = <0>; 289 linux,pci-domain = <0>;
@@ -319,6 +320,7 @@
319 device_type = "pci"; 320 device_type = "pci";
320 ranges = <0x81000000 0 0 0x03000 0 0x00010000 321 ranges = <0x81000000 0 0 0x03000 0 0x00010000
321 0x82000000 0 0x30013000 0x13000 0 0xffed000>; 322 0x82000000 0 0x30013000 0x13000 0 0xffed000>;
323 bus-range = <0x00 0xff>;
322 #interrupt-cells = <1>; 324 #interrupt-cells = <1>;
323 num-lanes = <1>; 325 num-lanes = <1>;
324 linux,pci-domain = <1>; 326 linux,pci-domain = <1>;
diff --git a/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
index 49f466fe0b1d..dcfc97591433 100644
--- a/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
+++ b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
@@ -121,11 +121,6 @@
121 }; 121 };
122}; 122};
123 123
124&cpu0 {
125 arm-supply = <&sw1a_reg>;
126 soc-supply = <&sw1c_reg>;
127};
128
129&fec1 { 124&fec1 {
130 pinctrl-names = "default"; 125 pinctrl-names = "default";
131 pinctrl-0 = <&pinctrl_enet1>; 126 pinctrl-0 = <&pinctrl_enet1>;
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
index 8f9a69ca818c..efe53998c961 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
@@ -121,7 +121,7 @@
121&i2c3 { 121&i2c3 {
122 clock-frequency = <400000>; 122 clock-frequency = <400000>;
123 at24@50 { 123 at24@50 {
124 compatible = "at24,24c02"; 124 compatible = "atmel,24c64";
125 readonly; 125 readonly;
126 reg = <0x50>; 126 reg = <0x50>;
127 }; 127 };
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 22332be72140..528b4e9c6d3d 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -266,7 +266,7 @@
266 }; 266 };
267 267
268 usb1: ohci@00400000 { 268 usb1: ohci@00400000 {
269 compatible = "atmel,sama5d2-ohci", "usb-ohci"; 269 compatible = "atmel,at91rm9200-ohci", "usb-ohci";
270 reg = <0x00400000 0x100000>; 270 reg = <0x00400000 0x100000>;
271 interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>; 271 interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>;
272 clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>; 272 clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index 82d8c4771293..162e1eb5373d 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -14,6 +14,7 @@
14#include <dt-bindings/mfd/dbx500-prcmu.h> 14#include <dt-bindings/mfd/dbx500-prcmu.h>
15#include <dt-bindings/arm/ux500_pm_domains.h> 15#include <dt-bindings/arm/ux500_pm_domains.h>
16#include <dt-bindings/gpio/gpio.h> 16#include <dt-bindings/gpio/gpio.h>
17#include <dt-bindings/clock/ste-ab8500.h>
17#include "skeleton.dtsi" 18#include "skeleton.dtsi"
18 19
19/ { 20/ {
@@ -603,6 +604,11 @@
603 interrupt-controller; 604 interrupt-controller;
604 #interrupt-cells = <2>; 605 #interrupt-cells = <2>;
605 606
607 ab8500_clock: clock-controller {
608 compatible = "stericsson,ab8500-clk";
609 #clock-cells = <1>;
610 };
611
606 ab8500_gpio: ab8500-gpio { 612 ab8500_gpio: ab8500-gpio {
607 compatible = "stericsson,ab8500-gpio"; 613 compatible = "stericsson,ab8500-gpio";
608 gpio-controller; 614 gpio-controller;
@@ -686,6 +692,8 @@
686 692
687 ab8500-pwm { 693 ab8500-pwm {
688 compatible = "stericsson,ab8500-pwm"; 694 compatible = "stericsson,ab8500-pwm";
695 clocks = <&ab8500_clock AB8500_SYSCLK_INT>;
696 clock-names = "intclk";
689 }; 697 };
690 698
691 ab8500-debugfs { 699 ab8500-debugfs {
@@ -700,6 +708,9 @@
700 V-AMIC2-supply = <&ab8500_ldo_anamic2_reg>; 708 V-AMIC2-supply = <&ab8500_ldo_anamic2_reg>;
701 V-DMIC-supply = <&ab8500_ldo_dmic_reg>; 709 V-DMIC-supply = <&ab8500_ldo_dmic_reg>;
702 710
711 clocks = <&ab8500_clock AB8500_SYSCLK_AUDIO>;
712 clock-names = "audioclk";
713
703 stericsson,earpeice-cmv = <950>; /* Units in mV. */ 714 stericsson,earpeice-cmv = <950>; /* Units in mV. */
704 }; 715 };
705 716
@@ -1095,6 +1106,14 @@
1095 status = "disabled"; 1106 status = "disabled";
1096 }; 1107 };
1097 1108
1109 sound {
1110 compatible = "stericsson,snd-soc-mop500";
1111 stericsson,cpu-dai = <&msp1 &msp3>;
1112 stericsson,audio-codec = <&codec>;
1113 clocks = <&prcmu_clk PRCMU_SYSCLK>, <&ab8500_clock AB8500_SYSCLK_ULP>, <&ab8500_clock AB8500_SYSCLK_INT>;
1114 clock-names = "sysclk", "ulpclk", "intclk";
1115 };
1116
1098 msp0: msp@80123000 { 1117 msp0: msp@80123000 {
1099 compatible = "stericsson,ux500-msp-i2s"; 1118 compatible = "stericsson,ux500-msp-i2s";
1100 reg = <0x80123000 0x1000>; 1119 reg = <0x80123000 0x1000>;
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
index f37f9e10713c..9e359e4f342e 100644
--- a/arch/arm/boot/dts/ste-href.dtsi
+++ b/arch/arm/boot/dts/ste-href.dtsi
@@ -186,15 +186,6 @@
186 status = "okay"; 186 status = "okay";
187 }; 187 };
188 188
189 sound {
190 compatible = "stericsson,snd-soc-mop500";
191
192 stericsson,cpu-dai = <&msp1 &msp3>;
193 stericsson,audio-codec = <&codec>;
194 clocks = <&prcmu_clk PRCMU_SYSCLK>;
195 clock-names = "sysclk";
196 };
197
198 msp0: msp@80123000 { 189 msp0: msp@80123000 {
199 pinctrl-names = "default"; 190 pinctrl-names = "default";
200 pinctrl-0 = <&msp0_default_mode>; 191 pinctrl-0 = <&msp0_default_mode>;
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index dd5514def604..ade1d0d4e5f4 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -159,15 +159,6 @@
159 "", "", "", "", "", "", "", ""; 159 "", "", "", "", "", "", "", "";
160 }; 160 };
161 161
162 sound {
163 compatible = "stericsson,snd-soc-mop500";
164
165 stericsson,cpu-dai = <&msp1 &msp3>;
166 stericsson,audio-codec = <&codec>;
167 clocks = <&prcmu_clk PRCMU_SYSCLK>;
168 clock-names = "sysclk";
169 };
170
171 msp0: msp@80123000 { 162 msp0: msp@80123000 {
172 pinctrl-names = "default"; 163 pinctrl-names = "default";
173 pinctrl-0 = <&msp0_default_mode>; 164 pinctrl-0 = <&msp0_default_mode>;
diff --git a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
index 72ec0d5ae052..bbf1c8cbaac6 100644
--- a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
+++ b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
@@ -167,7 +167,7 @@
167 reg = <8>; 167 reg = <8>;
168 label = "cpu"; 168 label = "cpu";
169 ethernet = <&gmac>; 169 ethernet = <&gmac>;
170 phy-mode = "rgmii"; 170 phy-mode = "rgmii-txid";
171 fixed-link { 171 fixed-link {
172 speed = <1000>; 172 speed = <1000>;
173 full-duplex; 173 full-duplex;
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
index a952cc0703cc..8a3ed21cb7bc 100644
--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
@@ -495,7 +495,7 @@
495 resets = <&ccu RST_BUS_GPU>; 495 resets = <&ccu RST_BUS_GPU>;
496 496
497 assigned-clocks = <&ccu CLK_GPU>; 497 assigned-clocks = <&ccu CLK_GPU>;
498 assigned-clock-rates = <408000000>; 498 assigned-clock-rates = <384000000>;
499 }; 499 };
500 500
501 gic: interrupt-controller@01c81000 { 501 gic: interrupt-controller@01c81000 {
diff --git a/arch/arm/boot/dts/sun8i-a33.dtsi b/arch/arm/boot/dts/sun8i-a33.dtsi
index 0467fb365bfc..306af6cadf26 100644
--- a/arch/arm/boot/dts/sun8i-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a33.dtsi
@@ -66,12 +66,6 @@
66 opp-microvolt = <1200000>; 66 opp-microvolt = <1200000>;
67 clock-latency-ns = <244144>; /* 8 32k periods */ 67 clock-latency-ns = <244144>; /* 8 32k periods */
68 }; 68 };
69
70 opp@1200000000 {
71 opp-hz = /bits/ 64 <1200000000>;
72 opp-microvolt = <1320000>;
73 clock-latency-ns = <244144>; /* 8 32k periods */
74 };
75 }; 69 };
76 70
77 cpus { 71 cpus {
@@ -81,16 +75,22 @@
81 operating-points-v2 = <&cpu0_opp_table>; 75 operating-points-v2 = <&cpu0_opp_table>;
82 }; 76 };
83 77
78 cpu@1 {
79 operating-points-v2 = <&cpu0_opp_table>;
80 };
81
84 cpu@2 { 82 cpu@2 {
85 compatible = "arm,cortex-a7"; 83 compatible = "arm,cortex-a7";
86 device_type = "cpu"; 84 device_type = "cpu";
87 reg = <2>; 85 reg = <2>;
86 operating-points-v2 = <&cpu0_opp_table>;
88 }; 87 };
89 88
90 cpu@3 { 89 cpu@3 {
91 compatible = "arm,cortex-a7"; 90 compatible = "arm,cortex-a7";
92 device_type = "cpu"; 91 device_type = "cpu";
93 reg = <3>; 92 reg = <3>;
93 operating-points-v2 = <&cpu0_opp_table>;
94 }; 94 };
95 }; 95 };
96 96
diff --git a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
index 7097c18ff487..d6bd15898db6 100644
--- a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
+++ b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
@@ -50,8 +50,6 @@
50 50
51 backlight: backlight { 51 backlight: backlight {
52 compatible = "pwm-backlight"; 52 compatible = "pwm-backlight";
53 pinctrl-names = "default";
54 pinctrl-0 = <&bl_en_pin>;
55 pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>; 53 pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
56 brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>; 54 brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
57 default-brightness-level = <8>; 55 default-brightness-level = <8>;
@@ -93,11 +91,6 @@
93}; 91};
94 92
95&pio { 93&pio {
96 bl_en_pin: bl_en_pin@0 {
97 pins = "PH6";
98 function = "gpio_in";
99 };
100
101 mmc0_cd_pin: mmc0_cd_pin@0 { 94 mmc0_cd_pin: mmc0_cd_pin@0 {
102 pins = "PB4"; 95 pins = "PB4";
103 function = "gpio_in"; 96 function = "gpio_in";
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index f2462a6bdba6..decd388d613d 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -188,6 +188,7 @@ CONFIG_WL12XX=m
188CONFIG_WL18XX=m 188CONFIG_WL18XX=m
189CONFIG_WLCORE_SPI=m 189CONFIG_WLCORE_SPI=m
190CONFIG_WLCORE_SDIO=m 190CONFIG_WLCORE_SDIO=m
191CONFIG_INPUT_MOUSEDEV=m
191CONFIG_INPUT_JOYDEV=m 192CONFIG_INPUT_JOYDEV=m
192CONFIG_INPUT_EVDEV=m 193CONFIG_INPUT_EVDEV=m
193CONFIG_KEYBOARD_ATKBD=m 194CONFIG_KEYBOARD_ATKBD=m
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 96dba7cd8be7..314eb6abe1ff 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1124,6 +1124,9 @@ static void cpu_hyp_reinit(void)
1124 if (__hyp_get_vectors() == hyp_default_vectors) 1124 if (__hyp_get_vectors() == hyp_default_vectors)
1125 cpu_init_hyp_mode(NULL); 1125 cpu_init_hyp_mode(NULL);
1126 } 1126 }
1127
1128 if (vgic_present)
1129 kvm_vgic_init_cpu_hardware();
1127} 1130}
1128 1131
1129static void cpu_hyp_reset(void) 1132static void cpu_hyp_reset(void)
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 962616fd4ddd..582a972371cf 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -292,11 +292,18 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
292 phys_addr_t addr = start, end = start + size; 292 phys_addr_t addr = start, end = start + size;
293 phys_addr_t next; 293 phys_addr_t next;
294 294
295 assert_spin_locked(&kvm->mmu_lock);
295 pgd = kvm->arch.pgd + stage2_pgd_index(addr); 296 pgd = kvm->arch.pgd + stage2_pgd_index(addr);
296 do { 297 do {
297 next = stage2_pgd_addr_end(addr, end); 298 next = stage2_pgd_addr_end(addr, end);
298 if (!stage2_pgd_none(*pgd)) 299 if (!stage2_pgd_none(*pgd))
299 unmap_stage2_puds(kvm, pgd, addr, next); 300 unmap_stage2_puds(kvm, pgd, addr, next);
301 /*
302 * If the range is too large, release the kvm->mmu_lock
303 * to prevent starvation and lockup detector warnings.
304 */
305 if (next != end)
306 cond_resched_lock(&kvm->mmu_lock);
300 } while (pgd++, addr = next, addr != end); 307 } while (pgd++, addr = next, addr != end);
301} 308}
302 309
@@ -803,6 +810,7 @@ void stage2_unmap_vm(struct kvm *kvm)
803 int idx; 810 int idx;
804 811
805 idx = srcu_read_lock(&kvm->srcu); 812 idx = srcu_read_lock(&kvm->srcu);
813 down_read(&current->mm->mmap_sem);
806 spin_lock(&kvm->mmu_lock); 814 spin_lock(&kvm->mmu_lock);
807 815
808 slots = kvm_memslots(kvm); 816 slots = kvm_memslots(kvm);
@@ -810,6 +818,7 @@ void stage2_unmap_vm(struct kvm *kvm)
810 stage2_unmap_memslot(kvm, memslot); 818 stage2_unmap_memslot(kvm, memslot);
811 819
812 spin_unlock(&kvm->mmu_lock); 820 spin_unlock(&kvm->mmu_lock);
821 up_read(&current->mm->mmap_sem);
813 srcu_read_unlock(&kvm->srcu, idx); 822 srcu_read_unlock(&kvm->srcu, idx);
814} 823}
815 824
@@ -829,7 +838,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
829 if (kvm->arch.pgd == NULL) 838 if (kvm->arch.pgd == NULL)
830 return; 839 return;
831 840
841 spin_lock(&kvm->mmu_lock);
832 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); 842 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
843 spin_unlock(&kvm->mmu_lock);
844
833 /* Free the HW pgd, one page at a time */ 845 /* Free the HW pgd, one page at a time */
834 free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE); 846 free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
835 kvm->arch.pgd = NULL; 847 kvm->arch.pgd = NULL;
@@ -1801,6 +1813,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1801 (KVM_PHYS_SIZE >> PAGE_SHIFT)) 1813 (KVM_PHYS_SIZE >> PAGE_SHIFT))
1802 return -EFAULT; 1814 return -EFAULT;
1803 1815
1816 down_read(&current->mm->mmap_sem);
1804 /* 1817 /*
1805 * A memory region could potentially cover multiple VMAs, and any holes 1818 * A memory region could potentially cover multiple VMAs, and any holes
1806 * between them, so iterate over all of them to find out if we can map 1819 * between them, so iterate over all of them to find out if we can map
@@ -1844,8 +1857,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1844 pa += vm_start - vma->vm_start; 1857 pa += vm_start - vma->vm_start;
1845 1858
1846 /* IO region dirty page logging not allowed */ 1859 /* IO region dirty page logging not allowed */
1847 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) 1860 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1848 return -EINVAL; 1861 ret = -EINVAL;
1862 goto out;
1863 }
1849 1864
1850 ret = kvm_phys_addr_ioremap(kvm, gpa, pa, 1865 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
1851 vm_end - vm_start, 1866 vm_end - vm_start,
@@ -1857,7 +1872,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1857 } while (hva < reg_end); 1872 } while (hva < reg_end);
1858 1873
1859 if (change == KVM_MR_FLAGS_ONLY) 1874 if (change == KVM_MR_FLAGS_ONLY)
1860 return ret; 1875 goto out;
1861 1876
1862 spin_lock(&kvm->mmu_lock); 1877 spin_lock(&kvm->mmu_lock);
1863 if (ret) 1878 if (ret)
@@ -1865,6 +1880,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1865 else 1880 else
1866 stage2_flush_memslot(kvm, memslot); 1881 stage2_flush_memslot(kvm, memslot);
1867 spin_unlock(&kvm->mmu_lock); 1882 spin_unlock(&kvm->mmu_lock);
1883out:
1884 up_read(&current->mm->mmap_sem);
1868 return ret; 1885 return ret;
1869} 1886}
1870 1887
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 3d89b7905bd9..a277981f414d 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -289,6 +289,22 @@ static void at91_ddr_standby(void)
289 at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1); 289 at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
290} 290}
291 291
292static void sama5d3_ddr_standby(void)
293{
294 u32 lpr0;
295 u32 saved_lpr0;
296
297 saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
298 lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
299 lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
300
301 at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
302
303 cpu_do_idle();
304
305 at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
306}
307
292/* We manage both DDRAM/SDRAM controllers, we need more than one value to 308/* We manage both DDRAM/SDRAM controllers, we need more than one value to
293 * remember. 309 * remember.
294 */ 310 */
@@ -323,7 +339,7 @@ static const struct of_device_id const ramc_ids[] __initconst = {
323 { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby }, 339 { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
324 { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby }, 340 { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
325 { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby }, 341 { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
326 { .compatible = "atmel,sama5d3-ddramc", .data = at91_ddr_standby }, 342 { .compatible = "atmel,sama5d3-ddramc", .data = sama5d3_ddr_standby },
327 { /*sentinel*/ } 343 { /*sentinel*/ }
328}; 344};
329 345
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 093458b62c8d..c89757abb0ae 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -241,6 +241,3 @@ obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o
241 241
242onenand-$(CONFIG_MTD_ONENAND_OMAP2) := gpmc-onenand.o 242onenand-$(CONFIG_MTD_ONENAND_OMAP2) := gpmc-onenand.o
243obj-y += $(onenand-m) $(onenand-y) 243obj-y += $(onenand-m) $(onenand-y)
244
245nand-$(CONFIG_MTD_NAND_OMAP2) := gpmc-nand.o
246obj-y += $(nand-m) $(nand-y)
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index c4f2ace91ea2..3089d3bfa19b 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -270,6 +270,7 @@ extern const struct smp_operations omap4_smp_ops;
270extern int omap4_mpuss_init(void); 270extern int omap4_mpuss_init(void);
271extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state); 271extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
272extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state); 272extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
273extern u32 omap4_get_cpu1_ns_pa_addr(void);
273#else 274#else
274static inline int omap4_enter_lowpower(unsigned int cpu, 275static inline int omap4_enter_lowpower(unsigned int cpu,
275 unsigned int power_state) 276 unsigned int power_state)
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
deleted file mode 100644
index f6ac027f3c3b..000000000000
--- a/arch/arm/mach-omap2/gpmc-nand.c
+++ /dev/null
@@ -1,154 +0,0 @@
1/*
2 * gpmc-nand.c
3 *
4 * Copyright (C) 2009 Texas Instruments
5 * Vimal Singh <vimalsingh@ti.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/kernel.h>
13#include <linux/platform_device.h>
14#include <linux/io.h>
15#include <linux/omap-gpmc.h>
16#include <linux/mtd/nand.h>
17#include <linux/platform_data/mtd-nand-omap2.h>
18
19#include <asm/mach/flash.h>
20
21#include "soc.h"
22
23/* minimum size for IO mapping */
24#define NAND_IO_SIZE 4
25
26static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
27{
28 /* platforms which support all ECC schemes */
29 if (soc_is_am33xx() || soc_is_am43xx() || cpu_is_omap44xx() ||
30 soc_is_omap54xx() || soc_is_dra7xx())
31 return 1;
32
33 if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW ||
34 ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) {
35 if (cpu_is_omap24xx())
36 return 0;
37 else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0))
38 return 0;
39 else
40 return 1;
41 }
42
43 /* OMAP3xxx do not have ELM engine, so cannot support ECC schemes
44 * which require H/W based ECC error detection */
45 if ((cpu_is_omap34xx() || cpu_is_omap3630()) &&
46 ((ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
47 (ecc_opt == OMAP_ECC_BCH8_CODE_HW)))
48 return 0;
49
50 /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */
51 if (ecc_opt == OMAP_ECC_HAM1_CODE_HW ||
52 ecc_opt == OMAP_ECC_HAM1_CODE_SW)
53 return 1;
54 else
55 return 0;
56}
57
58/* This function will go away once the device-tree convertion is complete */
59static void gpmc_set_legacy(struct omap_nand_platform_data *gpmc_nand_data,
60 struct gpmc_settings *s)
61{
62 /* Enable RD PIN Monitoring Reg */
63 if (gpmc_nand_data->dev_ready) {
64 s->wait_on_read = true;
65 s->wait_on_write = true;
66 }
67
68 if (gpmc_nand_data->devsize == NAND_BUSWIDTH_16)
69 s->device_width = GPMC_DEVWIDTH_16BIT;
70 else
71 s->device_width = GPMC_DEVWIDTH_8BIT;
72}
73
74int gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data,
75 struct gpmc_timings *gpmc_t)
76{
77 int err = 0;
78 struct gpmc_settings s;
79 struct platform_device *pdev;
80 struct resource gpmc_nand_res[] = {
81 { .flags = IORESOURCE_MEM, },
82 { .flags = IORESOURCE_IRQ, },
83 { .flags = IORESOURCE_IRQ, },
84 };
85
86 BUG_ON(gpmc_nand_data->cs >= GPMC_CS_NUM);
87
88 err = gpmc_cs_request(gpmc_nand_data->cs, NAND_IO_SIZE,
89 (unsigned long *)&gpmc_nand_res[0].start);
90 if (err < 0) {
91 pr_err("omap2-gpmc: Cannot request GPMC CS %d, error %d\n",
92 gpmc_nand_data->cs, err);
93 return err;
94 }
95 gpmc_nand_res[0].end = gpmc_nand_res[0].start + NAND_IO_SIZE - 1;
96 gpmc_nand_res[1].start = gpmc_get_client_irq(GPMC_IRQ_FIFOEVENTENABLE);
97 gpmc_nand_res[2].start = gpmc_get_client_irq(GPMC_IRQ_COUNT_EVENT);
98
99 memset(&s, 0, sizeof(struct gpmc_settings));
100 gpmc_set_legacy(gpmc_nand_data, &s);
101
102 s.device_nand = true;
103
104 if (gpmc_t) {
105 err = gpmc_cs_set_timings(gpmc_nand_data->cs, gpmc_t, &s);
106 if (err < 0) {
107 pr_err("omap2-gpmc: Unable to set gpmc timings: %d\n",
108 err);
109 return err;
110 }
111 }
112
113 err = gpmc_cs_program_settings(gpmc_nand_data->cs, &s);
114 if (err < 0)
115 goto out_free_cs;
116
117 err = gpmc_configure(GPMC_CONFIG_WP, 0);
118 if (err < 0)
119 goto out_free_cs;
120
121 if (!gpmc_hwecc_bch_capable(gpmc_nand_data->ecc_opt)) {
122 pr_err("omap2-nand: Unsupported NAND ECC scheme selected\n");
123 err = -EINVAL;
124 goto out_free_cs;
125 }
126
127
128 pdev = platform_device_alloc("omap2-nand", gpmc_nand_data->cs);
129 if (pdev) {
130 err = platform_device_add_resources(pdev, gpmc_nand_res,
131 ARRAY_SIZE(gpmc_nand_res));
132 if (!err)
133 pdev->dev.platform_data = gpmc_nand_data;
134 } else {
135 err = -ENOMEM;
136 }
137 if (err)
138 goto out_free_pdev;
139
140 err = platform_device_add(pdev);
141 if (err) {
142 dev_err(&pdev->dev, "Unable to register NAND device\n");
143 goto out_free_pdev;
144 }
145
146 return 0;
147
148out_free_pdev:
149 platform_device_put(pdev);
150out_free_cs:
151 gpmc_cs_free(gpmc_nand_data->cs);
152
153 return err;
154}
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
index 8633c703546a..2944af820558 100644
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
@@ -367,7 +367,7 @@ static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr)
367 return ret; 367 return ret;
368} 368}
369 369
370void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data) 370int gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
371{ 371{
372 int err; 372 int err;
373 struct device *dev = &gpmc_onenand_device.dev; 373 struct device *dev = &gpmc_onenand_device.dev;
@@ -393,15 +393,17 @@ void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
393 if (err < 0) { 393 if (err < 0) {
394 dev_err(dev, "Cannot request GPMC CS %d, error %d\n", 394 dev_err(dev, "Cannot request GPMC CS %d, error %d\n",
395 gpmc_onenand_data->cs, err); 395 gpmc_onenand_data->cs, err);
396 return; 396 return err;
397 } 397 }
398 398
399 gpmc_onenand_resource.end = gpmc_onenand_resource.start + 399 gpmc_onenand_resource.end = gpmc_onenand_resource.start +
400 ONENAND_IO_SIZE - 1; 400 ONENAND_IO_SIZE - 1;
401 401
402 if (platform_device_register(&gpmc_onenand_device) < 0) { 402 err = platform_device_register(&gpmc_onenand_device);
403 if (err) {
403 dev_err(dev, "Unable to register OneNAND device\n"); 404 dev_err(dev, "Unable to register OneNAND device\n");
404 gpmc_cs_free(gpmc_onenand_data->cs); 405 gpmc_cs_free(gpmc_onenand_data->cs);
405 return;
406 } 406 }
407
408 return err;
407} 409}
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index fe36ce2734d4..4c6f14cf92a8 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -17,6 +17,7 @@
17 17
18#include <linux/linkage.h> 18#include <linux/linkage.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <asm/assembler.h>
20 21
21#include "omap44xx.h" 22#include "omap44xx.h"
22 23
@@ -66,7 +67,7 @@ wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
66 cmp r0, r4 67 cmp r0, r4
67 bne wait_2 68 bne wait_2
68 ldr r12, =API_HYP_ENTRY 69 ldr r12, =API_HYP_ENTRY
69 adr r0, hyp_boot 70 badr r0, hyp_boot
70 smc #0 71 smc #0
71hyp_boot: 72hyp_boot:
72 b omap_secondary_startup 73 b omap_secondary_startup
diff --git a/arch/arm/mach-omap2/omap-hotplug.c b/arch/arm/mach-omap2/omap-hotplug.c
index d3fb5661bb5d..433db6d0b073 100644
--- a/arch/arm/mach-omap2/omap-hotplug.c
+++ b/arch/arm/mach-omap2/omap-hotplug.c
@@ -50,7 +50,7 @@ void omap4_cpu_die(unsigned int cpu)
50 omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF); 50 omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF);
51 51
52 if (omap_secure_apis_support()) 52 if (omap_secure_apis_support())
53 boot_cpu = omap_read_auxcoreboot0(); 53 boot_cpu = omap_read_auxcoreboot0() >> 9;
54 else 54 else
55 boot_cpu = 55 boot_cpu =
56 readl_relaxed(base + OMAP_AUX_CORE_BOOT_0) >> 5; 56 readl_relaxed(base + OMAP_AUX_CORE_BOOT_0) >> 5;
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
index 113ab2dd2ee9..03ec6d307c82 100644
--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
@@ -64,6 +64,7 @@
64#include "prm-regbits-44xx.h" 64#include "prm-regbits-44xx.h"
65 65
66static void __iomem *sar_base; 66static void __iomem *sar_base;
67static u32 old_cpu1_ns_pa_addr;
67 68
68#if defined(CONFIG_PM) && defined(CONFIG_SMP) 69#if defined(CONFIG_PM) && defined(CONFIG_SMP)
69 70
@@ -212,6 +213,11 @@ static void __init save_l2x0_context(void)
212{} 213{}
213#endif 214#endif
214 215
216u32 omap4_get_cpu1_ns_pa_addr(void)
217{
218 return old_cpu1_ns_pa_addr;
219}
220
215/** 221/**
216 * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function 222 * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
217 * The purpose of this function is to manage low power programming 223 * The purpose of this function is to manage low power programming
@@ -460,22 +466,30 @@ int __init omap4_mpuss_init(void)
460void __init omap4_mpuss_early_init(void) 466void __init omap4_mpuss_early_init(void)
461{ 467{
462 unsigned long startup_pa; 468 unsigned long startup_pa;
469 void __iomem *ns_pa_addr;
463 470
464 if (!(cpu_is_omap44xx() || soc_is_omap54xx())) 471 if (!(soc_is_omap44xx() || soc_is_omap54xx()))
465 return; 472 return;
466 473
467 sar_base = omap4_get_sar_ram_base(); 474 sar_base = omap4_get_sar_ram_base();
468 475
469 if (cpu_is_omap443x()) 476 /* Save old NS_PA_ADDR for validity checks later on */
477 if (soc_is_omap44xx())
478 ns_pa_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
479 else
480 ns_pa_addr = sar_base + OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
481 old_cpu1_ns_pa_addr = readl_relaxed(ns_pa_addr);
482
483 if (soc_is_omap443x())
470 startup_pa = __pa_symbol(omap4_secondary_startup); 484 startup_pa = __pa_symbol(omap4_secondary_startup);
471 else if (cpu_is_omap446x()) 485 else if (soc_is_omap446x())
472 startup_pa = __pa_symbol(omap4460_secondary_startup); 486 startup_pa = __pa_symbol(omap4460_secondary_startup);
473 else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE) 487 else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
474 startup_pa = __pa_symbol(omap5_secondary_hyp_startup); 488 startup_pa = __pa_symbol(omap5_secondary_hyp_startup);
475 else 489 else
476 startup_pa = __pa_symbol(omap5_secondary_startup); 490 startup_pa = __pa_symbol(omap5_secondary_startup);
477 491
478 if (cpu_is_omap44xx()) 492 if (soc_is_omap44xx())
479 writel_relaxed(startup_pa, sar_base + 493 writel_relaxed(startup_pa, sar_base +
480 CPU1_WAKEUP_NS_PA_ADDR_OFFSET); 494 CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
481 else 495 else
diff --git a/arch/arm/mach-omap2/omap-smc.S b/arch/arm/mach-omap2/omap-smc.S
index fd90125bffc7..72506e6cf9e7 100644
--- a/arch/arm/mach-omap2/omap-smc.S
+++ b/arch/arm/mach-omap2/omap-smc.S
@@ -94,6 +94,5 @@ ENTRY(omap_read_auxcoreboot0)
94 ldr r12, =0x103 94 ldr r12, =0x103
95 dsb 95 dsb
96 smc #0 96 smc #0
97 mov r0, r0, lsr #9
98 ldmfd sp!, {r2-r12, pc} 97 ldmfd sp!, {r2-r12, pc}
99ENDPROC(omap_read_auxcoreboot0) 98ENDPROC(omap_read_auxcoreboot0)
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 003353b0b794..3faf454ba487 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -21,6 +21,7 @@
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/irqchip/arm-gic.h> 22#include <linux/irqchip/arm-gic.h>
23 23
24#include <asm/sections.h>
24#include <asm/smp_scu.h> 25#include <asm/smp_scu.h>
25#include <asm/virt.h> 26#include <asm/virt.h>
26 27
@@ -40,10 +41,14 @@
40 41
41#define OMAP5_CORE_COUNT 0x2 42#define OMAP5_CORE_COUNT 0x2
42 43
44#define AUX_CORE_BOOT0_GP_RELEASE 0x020
45#define AUX_CORE_BOOT0_HS_RELEASE 0x200
46
43struct omap_smp_config { 47struct omap_smp_config {
44 unsigned long cpu1_rstctrl_pa; 48 unsigned long cpu1_rstctrl_pa;
45 void __iomem *cpu1_rstctrl_va; 49 void __iomem *cpu1_rstctrl_va;
46 void __iomem *scu_base; 50 void __iomem *scu_base;
51 void __iomem *wakeupgen_base;
47 void *startup_addr; 52 void *startup_addr;
48}; 53};
49 54
@@ -140,7 +145,6 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
140 static struct clockdomain *cpu1_clkdm; 145 static struct clockdomain *cpu1_clkdm;
141 static bool booted; 146 static bool booted;
142 static struct powerdomain *cpu1_pwrdm; 147 static struct powerdomain *cpu1_pwrdm;
143 void __iomem *base = omap_get_wakeupgen_base();
144 148
145 /* 149 /*
146 * Set synchronisation state between this boot processor 150 * Set synchronisation state between this boot processor
@@ -155,9 +159,11 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
155 * A barrier is added to ensure that write buffer is drained 159 * A barrier is added to ensure that write buffer is drained
156 */ 160 */
157 if (omap_secure_apis_support()) 161 if (omap_secure_apis_support())
158 omap_modify_auxcoreboot0(0x200, 0xfffffdff); 162 omap_modify_auxcoreboot0(AUX_CORE_BOOT0_HS_RELEASE,
163 0xfffffdff);
159 else 164 else
160 writel_relaxed(0x20, base + OMAP_AUX_CORE_BOOT_0); 165 writel_relaxed(AUX_CORE_BOOT0_GP_RELEASE,
166 cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
161 167
162 if (!cpu1_clkdm && !cpu1_pwrdm) { 168 if (!cpu1_clkdm && !cpu1_pwrdm) {
163 cpu1_clkdm = clkdm_lookup("mpu1_clkdm"); 169 cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
@@ -261,9 +267,72 @@ static void __init omap4_smp_init_cpus(void)
261 set_cpu_possible(i, true); 267 set_cpu_possible(i, true);
262} 268}
263 269
270/*
271 * For now, just make sure the start-up address is not within the booting
272 * kernel space as that means we just overwrote whatever secondary_startup()
273 * code there was.
274 */
275static bool __init omap4_smp_cpu1_startup_valid(unsigned long addr)
276{
277 if ((addr >= __pa(PAGE_OFFSET)) && (addr <= __pa(__bss_start)))
278 return false;
279
280 return true;
281}
282
283/*
284 * We may need to reset CPU1 before configuring, otherwise kexec boot can end
285 * up trying to use old kernel startup address or suspend-resume will
286 * occasionally fail to bring up CPU1 on 4430 if CPU1 fails to enter deeper
287 * idle states.
288 */
289static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c)
290{
291 unsigned long cpu1_startup_pa, cpu1_ns_pa_addr;
292 bool needs_reset = false;
293 u32 released;
294
295 if (omap_secure_apis_support())
296 released = omap_read_auxcoreboot0() & AUX_CORE_BOOT0_HS_RELEASE;
297 else
298 released = readl_relaxed(cfg.wakeupgen_base +
299 OMAP_AUX_CORE_BOOT_0) &
300 AUX_CORE_BOOT0_GP_RELEASE;
301 if (released) {
302 pr_warn("smp: CPU1 not parked?\n");
303
304 return;
305 }
306
307 cpu1_startup_pa = readl_relaxed(cfg.wakeupgen_base +
308 OMAP_AUX_CORE_BOOT_1);
309 cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr();
310
311 /* Did the configured secondary_startup() get overwritten? */
312 if (!omap4_smp_cpu1_startup_valid(cpu1_startup_pa))
313 needs_reset = true;
314
315 /*
316 * If omap4 or 5 has NS_PA_ADDR configured, CPU1 may be in a
317 * deeper idle state in WFI and will wake to an invalid address.
318 */
319 if ((soc_is_omap44xx() || soc_is_omap54xx()) &&
320 !omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr))
321 needs_reset = true;
322
323 if (!needs_reset || !c->cpu1_rstctrl_va)
324 return;
325
326 pr_info("smp: CPU1 parked within kernel, needs reset (0x%lx 0x%lx)\n",
327 cpu1_startup_pa, cpu1_ns_pa_addr);
328
329 writel_relaxed(1, c->cpu1_rstctrl_va);
330 readl_relaxed(c->cpu1_rstctrl_va);
331 writel_relaxed(0, c->cpu1_rstctrl_va);
332}
333
264static void __init omap4_smp_prepare_cpus(unsigned int max_cpus) 334static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
265{ 335{
266 void __iomem *base = omap_get_wakeupgen_base();
267 const struct omap_smp_config *c = NULL; 336 const struct omap_smp_config *c = NULL;
268 337
269 if (soc_is_omap443x()) 338 if (soc_is_omap443x())
@@ -281,6 +350,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
281 /* Must preserve cfg.scu_base set earlier */ 350 /* Must preserve cfg.scu_base set earlier */
282 cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa; 351 cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa;
283 cfg.startup_addr = c->startup_addr; 352 cfg.startup_addr = c->startup_addr;
353 cfg.wakeupgen_base = omap_get_wakeupgen_base();
284 354
285 if (soc_is_dra74x() || soc_is_omap54xx()) { 355 if (soc_is_dra74x() || soc_is_omap54xx()) {
286 if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE) 356 if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
@@ -299,15 +369,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
299 if (cfg.scu_base) 369 if (cfg.scu_base)
300 scu_enable(cfg.scu_base); 370 scu_enable(cfg.scu_base);
301 371
302 /* 372 omap4_smp_maybe_reset_cpu1(&cfg);
303 * Reset CPU1 before configuring, otherwise kexec will
304 * end up trying to use old kernel startup address.
305 */
306 if (cfg.cpu1_rstctrl_va) {
307 writel_relaxed(1, cfg.cpu1_rstctrl_va);
308 readl_relaxed(cfg.cpu1_rstctrl_va);
309 writel_relaxed(0, cfg.cpu1_rstctrl_va);
310 }
311 373
312 /* 374 /*
313 * Write the address of secondary startup routine into the 375 * Write the address of secondary startup routine into the
@@ -319,7 +381,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
319 omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr)); 381 omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr));
320 else 382 else
321 writel_relaxed(__pa_symbol(cfg.startup_addr), 383 writel_relaxed(__pa_symbol(cfg.startup_addr),
322 base + OMAP_AUX_CORE_BOOT_1); 384 cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_1);
323} 385}
324 386
325const struct smp_operations omap4_smp_ops __initconst = { 387const struct smp_operations omap4_smp_ops __initconst = {
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index e920dd83e443..f989145480c8 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -222,6 +222,14 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
222 dev_err(dev, "failed to idle\n"); 222 dev_err(dev, "failed to idle\n");
223 } 223 }
224 break; 224 break;
225 case BUS_NOTIFY_BIND_DRIVER:
226 od = to_omap_device(pdev);
227 if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) &&
228 pm_runtime_status_suspended(dev)) {
229 od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
230 pm_runtime_set_active(dev);
231 }
232 break;
225 case BUS_NOTIFY_ADD_DEVICE: 233 case BUS_NOTIFY_ADD_DEVICE:
226 if (pdev->dev.of_node) 234 if (pdev->dev.of_node)
227 omap_device_build_from_dt(pdev); 235 omap_device_build_from_dt(pdev);
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 56f917ec8621..1435fee39a89 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -2112,11 +2112,20 @@ static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = {
2112}; 2112};
2113 2113
2114/* L4 CORE -> SR1 interface */ 2114/* L4 CORE -> SR1 interface */
2115static struct omap_hwmod_addr_space omap3_sr1_addr_space[] = {
2116 {
2117 .pa_start = OMAP34XX_SR1_BASE,
2118 .pa_end = OMAP34XX_SR1_BASE + SZ_1K - 1,
2119 .flags = ADDR_TYPE_RT,
2120 },
2121 { },
2122};
2115 2123
2116static struct omap_hwmod_ocp_if omap34xx_l4_core__sr1 = { 2124static struct omap_hwmod_ocp_if omap34xx_l4_core__sr1 = {
2117 .master = &omap3xxx_l4_core_hwmod, 2125 .master = &omap3xxx_l4_core_hwmod,
2118 .slave = &omap34xx_sr1_hwmod, 2126 .slave = &omap34xx_sr1_hwmod,
2119 .clk = "sr_l4_ick", 2127 .clk = "sr_l4_ick",
2128 .addr = omap3_sr1_addr_space,
2120 .user = OCP_USER_MPU, 2129 .user = OCP_USER_MPU,
2121}; 2130};
2122 2131
@@ -2124,15 +2133,25 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr1 = {
2124 .master = &omap3xxx_l4_core_hwmod, 2133 .master = &omap3xxx_l4_core_hwmod,
2125 .slave = &omap36xx_sr1_hwmod, 2134 .slave = &omap36xx_sr1_hwmod,
2126 .clk = "sr_l4_ick", 2135 .clk = "sr_l4_ick",
2136 .addr = omap3_sr1_addr_space,
2127 .user = OCP_USER_MPU, 2137 .user = OCP_USER_MPU,
2128}; 2138};
2129 2139
2130/* L4 CORE -> SR1 interface */ 2140/* L4 CORE -> SR1 interface */
2141static struct omap_hwmod_addr_space omap3_sr2_addr_space[] = {
2142 {
2143 .pa_start = OMAP34XX_SR2_BASE,
2144 .pa_end = OMAP34XX_SR2_BASE + SZ_1K - 1,
2145 .flags = ADDR_TYPE_RT,
2146 },
2147 { },
2148};
2131 2149
2132static struct omap_hwmod_ocp_if omap34xx_l4_core__sr2 = { 2150static struct omap_hwmod_ocp_if omap34xx_l4_core__sr2 = {
2133 .master = &omap3xxx_l4_core_hwmod, 2151 .master = &omap3xxx_l4_core_hwmod,
2134 .slave = &omap34xx_sr2_hwmod, 2152 .slave = &omap34xx_sr2_hwmod,
2135 .clk = "sr_l4_ick", 2153 .clk = "sr_l4_ick",
2154 .addr = omap3_sr2_addr_space,
2136 .user = OCP_USER_MPU, 2155 .user = OCP_USER_MPU,
2137}; 2156};
2138 2157
@@ -2140,6 +2159,7 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr2 = {
2140 .master = &omap3xxx_l4_core_hwmod, 2159 .master = &omap3xxx_l4_core_hwmod,
2141 .slave = &omap36xx_sr2_hwmod, 2160 .slave = &omap36xx_sr2_hwmod,
2142 .clk = "sr_l4_ick", 2161 .clk = "sr_l4_ick",
2162 .addr = omap3_sr2_addr_space,
2143 .user = OCP_USER_MPU, 2163 .user = OCP_USER_MPU,
2144}; 2164};
2145 2165
@@ -3111,16 +3131,20 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_hwmod_ocp_ifs[] __initdata = {
3111 * Return: 0 if device named @dev_name is not likely to be accessible, 3131 * Return: 0 if device named @dev_name is not likely to be accessible,
3112 * or 1 if it is likely to be accessible. 3132 * or 1 if it is likely to be accessible.
3113 */ 3133 */
3114static int __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus, 3134static bool __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
3115 const char *dev_name) 3135 const char *dev_name)
3116{ 3136{
3137 struct device_node *node;
3138 bool available;
3139
3117 if (!bus) 3140 if (!bus)
3118 return (omap_type() == OMAP2_DEVICE_TYPE_GP) ? 1 : 0; 3141 return omap_type() == OMAP2_DEVICE_TYPE_GP;
3119 3142
3120 if (of_device_is_available(of_find_node_by_name(bus, dev_name))) 3143 node = of_get_child_by_name(bus, dev_name);
3121 return 1; 3144 available = of_device_is_available(node);
3145 of_node_put(node);
3122 3146
3123 return 0; 3147 return available;
3124} 3148}
3125 3149
3126int __init omap3xxx_hwmod_init(void) 3150int __init omap3xxx_hwmod_init(void)
@@ -3189,15 +3213,20 @@ int __init omap3xxx_hwmod_init(void)
3189 3213
3190 if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) { 3214 if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) {
3191 r = omap_hwmod_register_links(h_sham); 3215 r = omap_hwmod_register_links(h_sham);
3192 if (r < 0) 3216 if (r < 0) {
3217 of_node_put(bus);
3193 return r; 3218 return r;
3219 }
3194 } 3220 }
3195 3221
3196 if (h_aes && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "aes")) { 3222 if (h_aes && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "aes")) {
3197 r = omap_hwmod_register_links(h_aes); 3223 r = omap_hwmod_register_links(h_aes);
3198 if (r < 0) 3224 if (r < 0) {
3225 of_node_put(bus);
3199 return r; 3226 return r;
3227 }
3200 } 3228 }
3229 of_node_put(bus);
3201 3230
3202 /* 3231 /*
3203 * Register hwmod links specific to certain ES levels of a 3232 * Register hwmod links specific to certain ES levels of a
diff --git a/arch/arm/mach-orion5x/Kconfig b/arch/arm/mach-orion5x/Kconfig
index 633442ad4e4c..2a7bb6ccdcb7 100644
--- a/arch/arm/mach-orion5x/Kconfig
+++ b/arch/arm/mach-orion5x/Kconfig
@@ -6,6 +6,7 @@ menuconfig ARCH_ORION5X
6 select GPIOLIB 6 select GPIOLIB
7 select MVEBU_MBUS 7 select MVEBU_MBUS
8 select PCI 8 select PCI
9 select PHYLIB if NETDEVICES
9 select PLAT_ORION_LEGACY 10 select PLAT_ORION_LEGACY
10 help 11 help
11 Support for the following Marvell Orion 5x series SoCs: 12 Support for the following Marvell Orion 5x series SoCs:
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 63eabb06f9f1..475811f5383a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -935,13 +935,31 @@ static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_add
935 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); 935 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
936} 936}
937 937
938/*
939 * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
940 * that the intention is to allow exporting memory allocated via the
941 * coherent DMA APIs through the dma_buf API, which only accepts a
942 * scattertable. This presents a couple of problems:
943 * 1. Not all memory allocated via the coherent DMA APIs is backed by
944 * a struct page
945 * 2. Passing coherent DMA memory into the streaming APIs is not allowed
946 * as we will try to flush the memory through a different alias to that
947 * actually being used (and the flushes are redundant.)
948 */
938int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 949int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
939 void *cpu_addr, dma_addr_t handle, size_t size, 950 void *cpu_addr, dma_addr_t handle, size_t size,
940 unsigned long attrs) 951 unsigned long attrs)
941{ 952{
942 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 953 unsigned long pfn = dma_to_pfn(dev, handle);
954 struct page *page;
943 int ret; 955 int ret;
944 956
957 /* If the PFN is not valid, we do not have a struct page */
958 if (!pfn_valid(pfn))
959 return -ENXIO;
960
961 page = pfn_to_page(pfn);
962
945 ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 963 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
946 if (unlikely(ret)) 964 if (unlikely(ret))
947 return ret; 965 return ret;
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 3b5c7aaf9c76..33a45bd96860 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -303,7 +303,10 @@ static inline void set_vbar(unsigned long val)
303 */ 303 */
304static inline bool security_extensions_enabled(void) 304static inline bool security_extensions_enabled(void)
305{ 305{
306 return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4); 306 /* Check CPUID Identification Scheme before ID_PFR1 read */
307 if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
308 return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
309 return 0;
307} 310}
308 311
309static unsigned long __init setup_vectors_base(void) 312static unsigned long __init setup_vectors_base(void)
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index 9255b6d67ba5..aff6994950ba 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -468,6 +468,7 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
468 eth_data, &orion_ge11); 468 eth_data, &orion_ge11);
469} 469}
470 470
471#ifdef CONFIG_ARCH_ORION5X
471/***************************************************************************** 472/*****************************************************************************
472 * Ethernet switch 473 * Ethernet switch
473 ****************************************************************************/ 474 ****************************************************************************/
@@ -480,6 +481,9 @@ void __init orion_ge00_switch_init(struct dsa_chip_data *d)
480 struct mdio_board_info *bd; 481 struct mdio_board_info *bd;
481 unsigned int i; 482 unsigned int i;
482 483
484 if (!IS_BUILTIN(CONFIG_PHYLIB))
485 return;
486
483 for (i = 0; i < ARRAY_SIZE(d->port_names); i++) 487 for (i = 0; i < ARRAY_SIZE(d->port_names); i++)
484 if (!strcmp(d->port_names[i], "cpu")) 488 if (!strcmp(d->port_names[i], "cpu"))
485 break; 489 break;
@@ -493,6 +497,7 @@ void __init orion_ge00_switch_init(struct dsa_chip_data *d)
493 497
494 mdiobus_register_board_info(&orion_ge00_switch_board_info, 1); 498 mdiobus_register_board_info(&orion_ge00_switch_board_info, 1);
495} 499}
500#endif
496 501
497/***************************************************************************** 502/*****************************************************************************
498 * I2C 503 * I2C
diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
index b6dc9d838a9a..ad1f4e6a9e33 100644
--- a/arch/arm/probes/kprobes/core.c
+++ b/arch/arm/probes/kprobes/core.c
@@ -266,11 +266,20 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
266#endif 266#endif
267 267
268 if (p) { 268 if (p) {
269 if (cur) { 269 if (!p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
270 /*
271 * Probe hit but conditional execution check failed,
272 * so just skip the instruction and continue as if
273 * nothing had happened.
274 * In this case, we can skip recursing check too.
275 */
276 singlestep_skip(p, regs);
277 } else if (cur) {
270 /* Kprobe is pending, so we're recursing. */ 278 /* Kprobe is pending, so we're recursing. */
271 switch (kcb->kprobe_status) { 279 switch (kcb->kprobe_status) {
272 case KPROBE_HIT_ACTIVE: 280 case KPROBE_HIT_ACTIVE:
273 case KPROBE_HIT_SSDONE: 281 case KPROBE_HIT_SSDONE:
282 case KPROBE_HIT_SS:
274 /* A pre- or post-handler probe got us here. */ 283 /* A pre- or post-handler probe got us here. */
275 kprobes_inc_nmissed_count(p); 284 kprobes_inc_nmissed_count(p);
276 save_previous_kprobe(kcb); 285 save_previous_kprobe(kcb);
@@ -279,11 +288,16 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
279 singlestep(p, regs, kcb); 288 singlestep(p, regs, kcb);
280 restore_previous_kprobe(kcb); 289 restore_previous_kprobe(kcb);
281 break; 290 break;
291 case KPROBE_REENTER:
292 /* A nested probe was hit in FIQ, it is a BUG */
293 pr_warn("Unrecoverable kprobe detected at %p.\n",
294 p->addr);
295 /* fall through */
282 default: 296 default:
283 /* impossible cases */ 297 /* impossible cases */
284 BUG(); 298 BUG();
285 } 299 }
286 } else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) { 300 } else {
287 /* Probe hit and conditional execution check ok. */ 301 /* Probe hit and conditional execution check ok. */
288 set_current_kprobe(p); 302 set_current_kprobe(p);
289 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 303 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
@@ -304,13 +318,6 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
304 } 318 }
305 reset_current_kprobe(); 319 reset_current_kprobe();
306 } 320 }
307 } else {
308 /*
309 * Probe hit but conditional execution check failed,
310 * so just skip the instruction and continue as if
311 * nothing had happened.
312 */
313 singlestep_skip(p, regs);
314 } 321 }
315 } else if (cur) { 322 } else if (cur) {
316 /* We probably hit a jprobe. Call its break handler. */ 323 /* We probably hit a jprobe. Call its break handler. */
@@ -434,6 +441,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
434 struct hlist_node *tmp; 441 struct hlist_node *tmp;
435 unsigned long flags, orig_ret_address = 0; 442 unsigned long flags, orig_ret_address = 0;
436 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 443 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
444 kprobe_opcode_t *correct_ret_addr = NULL;
437 445
438 INIT_HLIST_HEAD(&empty_rp); 446 INIT_HLIST_HEAD(&empty_rp);
439 kretprobe_hash_lock(current, &head, &flags); 447 kretprobe_hash_lock(current, &head, &flags);
@@ -456,14 +464,34 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
456 /* another task is sharing our hash bucket */ 464 /* another task is sharing our hash bucket */
457 continue; 465 continue;
458 466
467 orig_ret_address = (unsigned long)ri->ret_addr;
468
469 if (orig_ret_address != trampoline_address)
470 /*
471 * This is the real return address. Any other
472 * instances associated with this task are for
473 * other calls deeper on the call stack
474 */
475 break;
476 }
477
478 kretprobe_assert(ri, orig_ret_address, trampoline_address);
479
480 correct_ret_addr = ri->ret_addr;
481 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
482 if (ri->task != current)
483 /* another task is sharing our hash bucket */
484 continue;
485
486 orig_ret_address = (unsigned long)ri->ret_addr;
459 if (ri->rp && ri->rp->handler) { 487 if (ri->rp && ri->rp->handler) {
460 __this_cpu_write(current_kprobe, &ri->rp->kp); 488 __this_cpu_write(current_kprobe, &ri->rp->kp);
461 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; 489 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
490 ri->ret_addr = correct_ret_addr;
462 ri->rp->handler(ri, regs); 491 ri->rp->handler(ri, regs);
463 __this_cpu_write(current_kprobe, NULL); 492 __this_cpu_write(current_kprobe, NULL);
464 } 493 }
465 494
466 orig_ret_address = (unsigned long)ri->ret_addr;
467 recycle_rp_inst(ri, &empty_rp); 495 recycle_rp_inst(ri, &empty_rp);
468 496
469 if (orig_ret_address != trampoline_address) 497 if (orig_ret_address != trampoline_address)
@@ -475,7 +503,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
475 break; 503 break;
476 } 504 }
477 505
478 kretprobe_assert(ri, orig_ret_address, trampoline_address);
479 kretprobe_hash_unlock(current, &flags); 506 kretprobe_hash_unlock(current, &flags);
480 507
481 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { 508 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c
index c893726aa52d..1c98a87786ca 100644
--- a/arch/arm/probes/kprobes/test-core.c
+++ b/arch/arm/probes/kprobes/test-core.c
@@ -977,7 +977,10 @@ static void coverage_end(void)
977void __naked __kprobes_test_case_start(void) 977void __naked __kprobes_test_case_start(void)
978{ 978{
979 __asm__ __volatile__ ( 979 __asm__ __volatile__ (
980 "stmdb sp!, {r4-r11} \n\t" 980 "mov r2, sp \n\t"
981 "bic r3, r2, #7 \n\t"
982 "mov sp, r3 \n\t"
983 "stmdb sp!, {r2-r11} \n\t"
981 "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" 984 "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
982 "bic r0, lr, #1 @ r0 = inline data \n\t" 985 "bic r0, lr, #1 @ r0 = inline data \n\t"
983 "mov r1, sp \n\t" 986 "mov r1, sp \n\t"
@@ -997,7 +1000,8 @@ void __naked __kprobes_test_case_end_32(void)
997 "movne pc, r0 \n\t" 1000 "movne pc, r0 \n\t"
998 "mov r0, r4 \n\t" 1001 "mov r0, r4 \n\t"
999 "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" 1002 "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
1000 "ldmia sp!, {r4-r11} \n\t" 1003 "ldmia sp!, {r2-r11} \n\t"
1004 "mov sp, r2 \n\t"
1001 "mov pc, r0 \n\t" 1005 "mov pc, r0 \n\t"
1002 ); 1006 );
1003} 1007}
@@ -1013,7 +1017,8 @@ void __naked __kprobes_test_case_end_16(void)
1013 "bxne r0 \n\t" 1017 "bxne r0 \n\t"
1014 "mov r0, r4 \n\t" 1018 "mov r0, r4 \n\t"
1015 "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" 1019 "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
1016 "ldmia sp!, {r4-r11} \n\t" 1020 "ldmia sp!, {r2-r11} \n\t"
1021 "mov sp, r2 \n\t"
1017 "bx r0 \n\t" 1022 "bx r0 \n\t"
1018 ); 1023 );
1019} 1024}
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index 1c64ea2d23f9..0565779e66fa 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -179,8 +179,10 @@
179 usbphy: phy@01c19400 { 179 usbphy: phy@01c19400 {
180 compatible = "allwinner,sun50i-a64-usb-phy"; 180 compatible = "allwinner,sun50i-a64-usb-phy";
181 reg = <0x01c19400 0x14>, 181 reg = <0x01c19400 0x14>,
182 <0x01c1a800 0x4>,
182 <0x01c1b800 0x4>; 183 <0x01c1b800 0x4>;
183 reg-names = "phy_ctrl", 184 reg-names = "phy_ctrl",
185 "pmu0",
184 "pmu1"; 186 "pmu1";
185 clocks = <&ccu CLK_USB_PHY0>, 187 clocks = <&ccu CLK_USB_PHY0>,
186 <&ccu CLK_USB_PHY1>; 188 <&ccu CLK_USB_PHY1>;
diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi
index 9f9e203c09c5..bcb03fc32665 100644
--- a/arch/arm64/boot/dts/broadcom/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi
@@ -114,6 +114,7 @@
114 pcie0: pcie@20020000 { 114 pcie0: pcie@20020000 {
115 compatible = "brcm,iproc-pcie"; 115 compatible = "brcm,iproc-pcie";
116 reg = <0 0x20020000 0 0x1000>; 116 reg = <0 0x20020000 0 0x1000>;
117 dma-coherent;
117 118
118 #interrupt-cells = <1>; 119 #interrupt-cells = <1>;
119 interrupt-map-mask = <0 0 0 0>; 120 interrupt-map-mask = <0 0 0 0>;
@@ -144,6 +145,7 @@
144 pcie4: pcie@50020000 { 145 pcie4: pcie@50020000 {
145 compatible = "brcm,iproc-pcie"; 146 compatible = "brcm,iproc-pcie";
146 reg = <0 0x50020000 0 0x1000>; 147 reg = <0 0x50020000 0 0x1000>;
148 dma-coherent;
147 149
148 #interrupt-cells = <1>; 150 #interrupt-cells = <1>;
149 interrupt-map-mask = <0 0 0 0>; 151 interrupt-map-mask = <0 0 0 0>;
@@ -174,6 +176,7 @@
174 pcie8: pcie@60c00000 { 176 pcie8: pcie@60c00000 {
175 compatible = "brcm,iproc-pcie-paxc"; 177 compatible = "brcm,iproc-pcie-paxc";
176 reg = <0 0x60c00000 0 0x1000>; 178 reg = <0 0x60c00000 0 0x1000>;
179 dma-coherent;
177 linux,pci-domain = <8>; 180 linux,pci-domain = <8>;
178 181
179 bus-range = <0x0 0x1>; 182 bus-range = <0x0 0x1>;
@@ -203,6 +206,7 @@
203 <0x61030000 0x100>; 206 <0x61030000 0x100>;
204 reg-names = "amac_base", "idm_base", "nicpm_base"; 207 reg-names = "amac_base", "idm_base", "nicpm_base";
205 interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>; 208 interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>;
209 dma-coherent;
206 phy-handle = <&gphy0>; 210 phy-handle = <&gphy0>;
207 phy-mode = "rgmii"; 211 phy-mode = "rgmii";
208 status = "disabled"; 212 status = "disabled";
@@ -213,6 +217,7 @@
213 reg = <0x612c0000 0x445>; /* PDC FS0 regs */ 217 reg = <0x612c0000 0x445>; /* PDC FS0 regs */
214 interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>; 218 interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
215 #mbox-cells = <1>; 219 #mbox-cells = <1>;
220 dma-coherent;
216 brcm,rx-status-len = <32>; 221 brcm,rx-status-len = <32>;
217 brcm,use-bcm-hdr; 222 brcm,use-bcm-hdr;
218 }; 223 };
@@ -222,6 +227,7 @@
222 reg = <0x612e0000 0x445>; /* PDC FS1 regs */ 227 reg = <0x612e0000 0x445>; /* PDC FS1 regs */
223 interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>; 228 interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
224 #mbox-cells = <1>; 229 #mbox-cells = <1>;
230 dma-coherent;
225 brcm,rx-status-len = <32>; 231 brcm,rx-status-len = <32>;
226 brcm,use-bcm-hdr; 232 brcm,use-bcm-hdr;
227 }; 233 };
@@ -231,6 +237,7 @@
231 reg = <0x61300000 0x445>; /* PDC FS2 regs */ 237 reg = <0x61300000 0x445>; /* PDC FS2 regs */
232 interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>; 238 interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
233 #mbox-cells = <1>; 239 #mbox-cells = <1>;
240 dma-coherent;
234 brcm,rx-status-len = <32>; 241 brcm,rx-status-len = <32>;
235 brcm,use-bcm-hdr; 242 brcm,use-bcm-hdr;
236 }; 243 };
@@ -240,6 +247,7 @@
240 reg = <0x61320000 0x445>; /* PDC FS3 regs */ 247 reg = <0x61320000 0x445>; /* PDC FS3 regs */
241 interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>; 248 interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
242 #mbox-cells = <1>; 249 #mbox-cells = <1>;
250 dma-coherent;
243 brcm,rx-status-len = <32>; 251 brcm,rx-status-len = <32>;
244 brcm,use-bcm-hdr; 252 brcm,use-bcm-hdr;
245 }; 253 };
@@ -644,6 +652,7 @@
644 sata: ahci@663f2000 { 652 sata: ahci@663f2000 {
645 compatible = "brcm,iproc-ahci", "generic-ahci"; 653 compatible = "brcm,iproc-ahci", "generic-ahci";
646 reg = <0x663f2000 0x1000>; 654 reg = <0x663f2000 0x1000>;
655 dma-coherent;
647 reg-names = "ahci"; 656 reg-names = "ahci";
648 interrupts = <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>; 657 interrupts = <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>;
649 #address-cells = <1>; 658 #address-cells = <1>;
@@ -667,6 +676,7 @@
667 compatible = "brcm,sdhci-iproc-cygnus"; 676 compatible = "brcm,sdhci-iproc-cygnus";
668 reg = <0x66420000 0x100>; 677 reg = <0x66420000 0x100>;
669 interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>; 678 interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>;
679 dma-coherent;
670 bus-width = <8>; 680 bus-width = <8>;
671 clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>; 681 clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>;
672 status = "disabled"; 682 status = "disabled";
@@ -676,6 +686,7 @@
676 compatible = "brcm,sdhci-iproc-cygnus"; 686 compatible = "brcm,sdhci-iproc-cygnus";
677 reg = <0x66430000 0x100>; 687 reg = <0x66430000 0x100>;
678 interrupts = <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>; 688 interrupts = <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>;
689 dma-coherent;
679 bus-width = <8>; 690 bus-width = <8>;
680 clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>; 691 clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>;
681 status = "disabled"; 692 status = "disabled";
diff --git a/arch/arm64/include/asm/current.h b/arch/arm64/include/asm/current.h
index 86c404171305..f6580d4afb0e 100644
--- a/arch/arm64/include/asm/current.h
+++ b/arch/arm64/include/asm/current.h
@@ -3,8 +3,6 @@
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5 5
6#include <asm/sysreg.h>
7
8#ifndef __ASSEMBLY__ 6#ifndef __ASSEMBLY__
9 7
10struct task_struct; 8struct task_struct;
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index e78ac26324bd..bdbeb06dc11e 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -44,7 +44,7 @@
44#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) 44#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
45#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) 45#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
46 46
47#define __NR_compat_syscalls 394 47#define __NR_compat_syscalls 398
48#endif 48#endif
49 49
50#define __ARCH_WANT_SYS_CLONE 50#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index b7e8ef16ff0d..c66b51aab195 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -809,6 +809,14 @@ __SYSCALL(__NR_copy_file_range, sys_copy_file_range)
809__SYSCALL(__NR_preadv2, compat_sys_preadv2) 809__SYSCALL(__NR_preadv2, compat_sys_preadv2)
810#define __NR_pwritev2 393 810#define __NR_pwritev2 393
811__SYSCALL(__NR_pwritev2, compat_sys_pwritev2) 811__SYSCALL(__NR_pwritev2, compat_sys_pwritev2)
812#define __NR_pkey_mprotect 394
813__SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
814#define __NR_pkey_alloc 395
815__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
816#define __NR_pkey_free 396
817__SYSCALL(__NR_pkey_free, sys_pkey_free)
818#define __NR_statx 397
819__SYSCALL(__NR_statx, sys_statx)
812 820
813/* 821/*
814 * Please add new compat syscalls above this comment and update 822 * Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 769f24ef628c..d7e90d97f5c4 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -131,11 +131,15 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
131 /* 131 /*
132 * The kernel Image should not extend across a 1GB/32MB/512MB alignment 132 * The kernel Image should not extend across a 1GB/32MB/512MB alignment
133 * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this 133 * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
134 * happens, increase the KASLR offset by the size of the kernel image. 134 * happens, increase the KASLR offset by the size of the kernel image
135 * rounded up by SWAPPER_BLOCK_SIZE.
135 */ 136 */
136 if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) != 137 if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
137 (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) 138 (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) {
138 offset = (offset + (u64)(_end - _text)) & mask; 139 u64 kimg_sz = _end - _text;
140 offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE))
141 & mask;
142 }
139 143
140 if (IS_ENABLED(CONFIG_KASAN)) 144 if (IS_ENABLED(CONFIG_KASAN))
141 /* 145 /*
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index ef1caae02110..9b1036570586 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -944,7 +944,7 @@ static bool have_cpu_die(void)
944#ifdef CONFIG_HOTPLUG_CPU 944#ifdef CONFIG_HOTPLUG_CPU
945 int any_cpu = raw_smp_processor_id(); 945 int any_cpu = raw_smp_processor_id();
946 946
947 if (cpu_ops[any_cpu]->cpu_die) 947 if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die)
948 return true; 948 return true;
949#endif 949#endif
950 return false; 950 return false;
diff --git a/arch/arm64/kernel/vdso/.gitignore b/arch/arm64/kernel/vdso/.gitignore
index b8cc94e9698b..f8b69d84238e 100644
--- a/arch/arm64/kernel/vdso/.gitignore
+++ b/arch/arm64/kernel/vdso/.gitignore
@@ -1,2 +1 @@
1vdso.lds vdso.lds
2vdso-offsets.h
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 4bf899fb451b..1b35b8bddbfb 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -42,7 +42,20 @@
42#include <asm/pgtable.h> 42#include <asm/pgtable.h>
43#include <asm/tlbflush.h> 43#include <asm/tlbflush.h>
44 44
45static const char *fault_name(unsigned int esr); 45struct fault_info {
46 int (*fn)(unsigned long addr, unsigned int esr,
47 struct pt_regs *regs);
48 int sig;
49 int code;
50 const char *name;
51};
52
53static const struct fault_info fault_info[];
54
55static inline const struct fault_info *esr_to_fault_info(unsigned int esr)
56{
57 return fault_info + (esr & 63);
58}
46 59
47#ifdef CONFIG_KPROBES 60#ifdef CONFIG_KPROBES
48static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr) 61static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
@@ -197,10 +210,12 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
197 struct pt_regs *regs) 210 struct pt_regs *regs)
198{ 211{
199 struct siginfo si; 212 struct siginfo si;
213 const struct fault_info *inf;
200 214
201 if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) { 215 if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
216 inf = esr_to_fault_info(esr);
202 pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n", 217 pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
203 tsk->comm, task_pid_nr(tsk), fault_name(esr), sig, 218 tsk->comm, task_pid_nr(tsk), inf->name, sig,
204 addr, esr); 219 addr, esr);
205 show_pte(tsk->mm, addr); 220 show_pte(tsk->mm, addr);
206 show_regs(regs); 221 show_regs(regs);
@@ -219,14 +234,16 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
219{ 234{
220 struct task_struct *tsk = current; 235 struct task_struct *tsk = current;
221 struct mm_struct *mm = tsk->active_mm; 236 struct mm_struct *mm = tsk->active_mm;
237 const struct fault_info *inf;
222 238
223 /* 239 /*
224 * If we are in kernel mode at this point, we have no context to 240 * If we are in kernel mode at this point, we have no context to
225 * handle this fault with. 241 * handle this fault with.
226 */ 242 */
227 if (user_mode(regs)) 243 if (user_mode(regs)) {
228 __do_user_fault(tsk, addr, esr, SIGSEGV, SEGV_MAPERR, regs); 244 inf = esr_to_fault_info(esr);
229 else 245 __do_user_fault(tsk, addr, esr, inf->sig, inf->code, regs);
246 } else
230 __do_kernel_fault(mm, addr, esr, regs); 247 __do_kernel_fault(mm, addr, esr, regs);
231} 248}
232 249
@@ -488,12 +505,7 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
488 return 1; 505 return 1;
489} 506}
490 507
491static const struct fault_info { 508static const struct fault_info fault_info[] = {
492 int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
493 int sig;
494 int code;
495 const char *name;
496} fault_info[] = {
497 { do_bad, SIGBUS, 0, "ttbr address size fault" }, 509 { do_bad, SIGBUS, 0, "ttbr address size fault" },
498 { do_bad, SIGBUS, 0, "level 1 address size fault" }, 510 { do_bad, SIGBUS, 0, "level 1 address size fault" },
499 { do_bad, SIGBUS, 0, "level 2 address size fault" }, 511 { do_bad, SIGBUS, 0, "level 2 address size fault" },
@@ -560,19 +572,13 @@ static const struct fault_info {
560 { do_bad, SIGBUS, 0, "unknown 63" }, 572 { do_bad, SIGBUS, 0, "unknown 63" },
561}; 573};
562 574
563static const char *fault_name(unsigned int esr)
564{
565 const struct fault_info *inf = fault_info + (esr & 63);
566 return inf->name;
567}
568
569/* 575/*
570 * Dispatch a data abort to the relevant handler. 576 * Dispatch a data abort to the relevant handler.
571 */ 577 */
572asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, 578asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
573 struct pt_regs *regs) 579 struct pt_regs *regs)
574{ 580{
575 const struct fault_info *inf = fault_info + (esr & 63); 581 const struct fault_info *inf = esr_to_fault_info(esr);
576 struct siginfo info; 582 struct siginfo info;
577 583
578 if (!inf->fn(addr, esr, regs)) 584 if (!inf->fn(addr, esr, regs))
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index e25584d72396..7514a000e361 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -294,10 +294,6 @@ static __init int setup_hugepagesz(char *opt)
294 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); 294 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
295 } else if (ps == PUD_SIZE) { 295 } else if (ps == PUD_SIZE) {
296 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); 296 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
297 } else if (ps == (PAGE_SIZE * CONT_PTES)) {
298 hugetlb_add_hstate(CONT_PTE_SHIFT);
299 } else if (ps == (PMD_SIZE * CONT_PMDS)) {
300 hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
301 } else { 297 } else {
302 hugetlb_bad_size(); 298 hugetlb_bad_size();
303 pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10); 299 pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
@@ -306,13 +302,3 @@ static __init int setup_hugepagesz(char *opt)
306 return 1; 302 return 1;
307} 303}
308__setup("hugepagesz=", setup_hugepagesz); 304__setup("hugepagesz=", setup_hugepagesz);
309
310#ifdef CONFIG_ARM64_64K_PAGES
311static __init int add_default_hugepagesz(void)
312{
313 if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
314 hugetlb_add_hstate(CONT_PTE_SHIFT);
315 return 0;
316}
317arch_initcall(add_default_hugepagesz);
318#endif
diff --git a/arch/c6x/kernel/ptrace.c b/arch/c6x/kernel/ptrace.c
index a27e1f02ce18..8801dc98fd44 100644
--- a/arch/c6x/kernel/ptrace.c
+++ b/arch/c6x/kernel/ptrace.c
@@ -70,46 +70,6 @@ static int gpr_get(struct task_struct *target,
70 0, sizeof(*regs)); 70 0, sizeof(*regs));
71} 71}
72 72
73static int gpr_set(struct task_struct *target,
74 const struct user_regset *regset,
75 unsigned int pos, unsigned int count,
76 const void *kbuf, const void __user *ubuf)
77{
78 int ret;
79 struct pt_regs *regs = task_pt_regs(target);
80
81 /* Don't copyin TSR or CSR */
82 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
83 &regs,
84 0, PT_TSR * sizeof(long));
85 if (ret)
86 return ret;
87
88 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
89 PT_TSR * sizeof(long),
90 (PT_TSR + 1) * sizeof(long));
91 if (ret)
92 return ret;
93
94 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
95 &regs,
96 (PT_TSR + 1) * sizeof(long),
97 PT_CSR * sizeof(long));
98 if (ret)
99 return ret;
100
101 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
102 PT_CSR * sizeof(long),
103 (PT_CSR + 1) * sizeof(long));
104 if (ret)
105 return ret;
106
107 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
108 &regs,
109 (PT_CSR + 1) * sizeof(long), -1);
110 return ret;
111}
112
113enum c6x_regset { 73enum c6x_regset {
114 REGSET_GPR, 74 REGSET_GPR,
115}; 75};
@@ -121,7 +81,6 @@ static const struct user_regset c6x_regsets[] = {
121 .size = sizeof(u32), 81 .size = sizeof(u32),
122 .align = sizeof(u32), 82 .align = sizeof(u32),
123 .get = gpr_get, 83 .get = gpr_get,
124 .set = gpr_set
125 }, 84 },
126}; 85};
127 86
diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c
index 92075544a19a..0dc1c8f622bc 100644
--- a/arch/h8300/kernel/ptrace.c
+++ b/arch/h8300/kernel/ptrace.c
@@ -95,7 +95,8 @@ static int regs_get(struct task_struct *target,
95 long *reg = (long *)&regs; 95 long *reg = (long *)&regs;
96 96
97 /* build user regs in buffer */ 97 /* build user regs in buffer */
98 for (r = 0; r < ARRAY_SIZE(register_offset); r++) 98 BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0);
99 for (r = 0; r < sizeof(regs) / sizeof(long); r++)
99 *reg++ = h8300_get_reg(target, r); 100 *reg++ = h8300_get_reg(target, r);
100 101
101 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 102 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
@@ -113,7 +114,8 @@ static int regs_set(struct task_struct *target,
113 long *reg; 114 long *reg;
114 115
115 /* build user regs in buffer */ 116 /* build user regs in buffer */
116 for (reg = (long *)&regs, r = 0; r < ARRAY_SIZE(register_offset); r++) 117 BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0);
118 for (reg = (long *)&regs, r = 0; r < sizeof(regs) / sizeof(long); r++)
117 *reg++ = h8300_get_reg(target, r); 119 *reg++ = h8300_get_reg(target, r);
118 120
119 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 121 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
@@ -122,7 +124,7 @@ static int regs_set(struct task_struct *target,
122 return ret; 124 return ret;
123 125
124 /* write back to pt_regs */ 126 /* write back to pt_regs */
125 for (reg = (long *)&regs, r = 0; r < ARRAY_SIZE(register_offset); r++) 127 for (reg = (long *)&regs, r = 0; r < sizeof(regs) / sizeof(long); r++)
126 h8300_put_reg(target, r, *reg++); 128 h8300_put_reg(target, r, *reg++);
127 return 0; 129 return 0;
128} 130}
diff --git a/arch/ia64/include/asm/asm-prototypes.h b/arch/ia64/include/asm/asm-prototypes.h
new file mode 100644
index 000000000000..a2c139808cfe
--- /dev/null
+++ b/arch/ia64/include/asm/asm-prototypes.h
@@ -0,0 +1,29 @@
1#ifndef _ASM_IA64_ASM_PROTOTYPES_H
2#define _ASM_IA64_ASM_PROTOTYPES_H
3
4#include <asm/cacheflush.h>
5#include <asm/checksum.h>
6#include <asm/esi.h>
7#include <asm/ftrace.h>
8#include <asm/page.h>
9#include <asm/pal.h>
10#include <asm/string.h>
11#include <asm/uaccess.h>
12#include <asm/unwind.h>
13#include <asm/xor.h>
14
15extern const char ia64_ivt[];
16
17signed int __divsi3(signed int, unsigned int);
18signed int __modsi3(signed int, unsigned int);
19
20signed long long __divdi3(signed long long, unsigned long long);
21signed long long __moddi3(signed long long, unsigned long long);
22
23unsigned int __udivsi3(unsigned int, unsigned int);
24unsigned int __umodsi3(unsigned int, unsigned int);
25
26unsigned long long __udivdi3(unsigned long long, unsigned long long);
27unsigned long long __umoddi3(unsigned long long, unsigned long long);
28
29#endif /* _ASM_IA64_ASM_PROTOTYPES_H */
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile
index 1f3d3877618f..0a40b14407b1 100644
--- a/arch/ia64/lib/Makefile
+++ b/arch/ia64/lib/Makefile
@@ -24,25 +24,25 @@ AFLAGS___modsi3.o = -DMODULO
24AFLAGS___umodsi3.o = -DUNSIGNED -DMODULO 24AFLAGS___umodsi3.o = -DUNSIGNED -DMODULO
25 25
26$(obj)/__divdi3.o: $(src)/idiv64.S FORCE 26$(obj)/__divdi3.o: $(src)/idiv64.S FORCE
27 $(call if_changed_dep,as_o_S) 27 $(call if_changed_rule,as_o_S)
28 28
29$(obj)/__udivdi3.o: $(src)/idiv64.S FORCE 29$(obj)/__udivdi3.o: $(src)/idiv64.S FORCE
30 $(call if_changed_dep,as_o_S) 30 $(call if_changed_rule,as_o_S)
31 31
32$(obj)/__moddi3.o: $(src)/idiv64.S FORCE 32$(obj)/__moddi3.o: $(src)/idiv64.S FORCE
33 $(call if_changed_dep,as_o_S) 33 $(call if_changed_rule,as_o_S)
34 34
35$(obj)/__umoddi3.o: $(src)/idiv64.S FORCE 35$(obj)/__umoddi3.o: $(src)/idiv64.S FORCE
36 $(call if_changed_dep,as_o_S) 36 $(call if_changed_rule,as_o_S)
37 37
38$(obj)/__divsi3.o: $(src)/idiv32.S FORCE 38$(obj)/__divsi3.o: $(src)/idiv32.S FORCE
39 $(call if_changed_dep,as_o_S) 39 $(call if_changed_rule,as_o_S)
40 40
41$(obj)/__udivsi3.o: $(src)/idiv32.S FORCE 41$(obj)/__udivsi3.o: $(src)/idiv32.S FORCE
42 $(call if_changed_dep,as_o_S) 42 $(call if_changed_rule,as_o_S)
43 43
44$(obj)/__modsi3.o: $(src)/idiv32.S FORCE 44$(obj)/__modsi3.o: $(src)/idiv32.S FORCE
45 $(call if_changed_dep,as_o_S) 45 $(call if_changed_rule,as_o_S)
46 46
47$(obj)/__umodsi3.o: $(src)/idiv32.S FORCE 47$(obj)/__umodsi3.o: $(src)/idiv32.S FORCE
48 $(call if_changed_dep,as_o_S) 48 $(call if_changed_rule,as_o_S)
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 048bf076f7df..531cb9eb3319 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y
25# CONFIG_EFI_PARTITION is not set 25# CONFIG_EFI_PARTITION is not set
26CONFIG_SYSV68_PARTITION=y 26CONFIG_SYSV68_PARTITION=y
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68020=y 31CONFIG_M68020=y
@@ -60,6 +61,7 @@ CONFIG_NET_IPVTI=m
60CONFIG_NET_FOU_IP_TUNNELS=y 61CONFIG_NET_FOU_IP_TUNNELS=y
61CONFIG_INET_AH=m 62CONFIG_INET_AH=m
62CONFIG_INET_ESP=m 63CONFIG_INET_ESP=m
64CONFIG_INET_ESP_OFFLOAD=m
63CONFIG_INET_IPCOMP=m 65CONFIG_INET_IPCOMP=m
64CONFIG_INET_XFRM_MODE_TRANSPORT=m 66CONFIG_INET_XFRM_MODE_TRANSPORT=m
65CONFIG_INET_XFRM_MODE_TUNNEL=m 67CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -71,6 +73,7 @@ CONFIG_IPV6=m
71CONFIG_IPV6_ROUTER_PREF=y 73CONFIG_IPV6_ROUTER_PREF=y
72CONFIG_INET6_AH=m 74CONFIG_INET6_AH=m
73CONFIG_INET6_ESP=m 75CONFIG_INET6_ESP=m
76CONFIG_INET6_ESP_OFFLOAD=m
74CONFIG_INET6_IPCOMP=m 77CONFIG_INET6_IPCOMP=m
75CONFIG_IPV6_ILA=m 78CONFIG_IPV6_ILA=m
76CONFIG_IPV6_VTI=m 79CONFIG_IPV6_VTI=m
@@ -101,6 +104,7 @@ CONFIG_NFT_NUMGEN=m
101CONFIG_NFT_CT=m 104CONFIG_NFT_CT=m
102CONFIG_NFT_SET_RBTREE=m 105CONFIG_NFT_SET_RBTREE=m
103CONFIG_NFT_SET_HASH=m 106CONFIG_NFT_SET_HASH=m
107CONFIG_NFT_SET_BITMAP=m
104CONFIG_NFT_COUNTER=m 108CONFIG_NFT_COUNTER=m
105CONFIG_NFT_LOG=m 109CONFIG_NFT_LOG=m
106CONFIG_NFT_LIMIT=m 110CONFIG_NFT_LIMIT=m
@@ -298,6 +302,8 @@ CONFIG_MPLS_IPTUNNEL=m
298CONFIG_NET_L3_MASTER_DEV=y 302CONFIG_NET_L3_MASTER_DEV=y
299CONFIG_AF_KCM=m 303CONFIG_AF_KCM=m
300# CONFIG_WIRELESS is not set 304# CONFIG_WIRELESS is not set
305CONFIG_PSAMPLE=m
306CONFIG_NET_IFE=m
301CONFIG_NET_DEVLINK=m 307CONFIG_NET_DEVLINK=m
302# CONFIG_UEVENT_HELPER is not set 308# CONFIG_UEVENT_HELPER is not set
303CONFIG_DEVTMPFS=y 309CONFIG_DEVTMPFS=y
@@ -371,6 +377,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
371CONFIG_MACVLAN=m 377CONFIG_MACVLAN=m
372CONFIG_MACVTAP=m 378CONFIG_MACVTAP=m
373CONFIG_IPVLAN=m 379CONFIG_IPVLAN=m
380CONFIG_IPVTAP=m
374CONFIG_VXLAN=m 381CONFIG_VXLAN=m
375CONFIG_GENEVE=m 382CONFIG_GENEVE=m
376CONFIG_GTP=m 383CONFIG_GTP=m
@@ -383,6 +390,7 @@ CONFIG_VETH=m
383# CONFIG_NET_VENDOR_AMAZON is not set 390# CONFIG_NET_VENDOR_AMAZON is not set
384CONFIG_A2065=y 391CONFIG_A2065=y
385CONFIG_ARIADNE=y 392CONFIG_ARIADNE=y
393# CONFIG_NET_VENDOR_AQUANTIA is not set
386# CONFIG_NET_VENDOR_ARC is not set 394# CONFIG_NET_VENDOR_ARC is not set
387# CONFIG_NET_CADENCE is not set 395# CONFIG_NET_CADENCE is not set
388# CONFIG_NET_VENDOR_BROADCOM is not set 396# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -404,7 +412,6 @@ CONFIG_ZORRO8390=y
404# CONFIG_NET_VENDOR_SOLARFLARE is not set 412# CONFIG_NET_VENDOR_SOLARFLARE is not set
405# CONFIG_NET_VENDOR_SMSC is not set 413# CONFIG_NET_VENDOR_SMSC is not set
406# CONFIG_NET_VENDOR_STMICRO is not set 414# CONFIG_NET_VENDOR_STMICRO is not set
407# CONFIG_NET_VENDOR_SYNOPSYS is not set
408# CONFIG_NET_VENDOR_VIA is not set 415# CONFIG_NET_VENDOR_VIA is not set
409# CONFIG_NET_VENDOR_WIZNET is not set 416# CONFIG_NET_VENDOR_WIZNET is not set
410CONFIG_PPP=m 417CONFIG_PPP=m
@@ -564,6 +571,8 @@ CONFIG_NLS_MAC_TURKISH=m
564CONFIG_DLM=m 571CONFIG_DLM=m
565# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 572# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
566CONFIG_MAGIC_SYSRQ=y 573CONFIG_MAGIC_SYSRQ=y
574CONFIG_WW_MUTEX_SELFTEST=m
575CONFIG_ATOMIC64_SELFTEST=m
567CONFIG_ASYNC_RAID6_TEST=m 576CONFIG_ASYNC_RAID6_TEST=m
568CONFIG_TEST_HEXDUMP=m 577CONFIG_TEST_HEXDUMP=m
569CONFIG_TEST_STRING_HELPERS=m 578CONFIG_TEST_STRING_HELPERS=m
@@ -594,6 +603,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
594CONFIG_CRYPTO_LRW=m 603CONFIG_CRYPTO_LRW=m
595CONFIG_CRYPTO_PCBC=m 604CONFIG_CRYPTO_PCBC=m
596CONFIG_CRYPTO_KEYWRAP=m 605CONFIG_CRYPTO_KEYWRAP=m
606CONFIG_CRYPTO_CMAC=m
597CONFIG_CRYPTO_XCBC=m 607CONFIG_CRYPTO_XCBC=m
598CONFIG_CRYPTO_VMAC=m 608CONFIG_CRYPTO_VMAC=m
599CONFIG_CRYPTO_MICHAEL_MIC=m 609CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -605,6 +615,7 @@ CONFIG_CRYPTO_SHA512=m
605CONFIG_CRYPTO_SHA3=m 615CONFIG_CRYPTO_SHA3=m
606CONFIG_CRYPTO_TGR192=m 616CONFIG_CRYPTO_TGR192=m
607CONFIG_CRYPTO_WP512=m 617CONFIG_CRYPTO_WP512=m
618CONFIG_CRYPTO_AES_TI=m
608CONFIG_CRYPTO_ANUBIS=m 619CONFIG_CRYPTO_ANUBIS=m
609CONFIG_CRYPTO_BLOWFISH=m 620CONFIG_CRYPTO_BLOWFISH=m
610CONFIG_CRYPTO_CAMELLIA=m 621CONFIG_CRYPTO_CAMELLIA=m
@@ -629,4 +640,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
629CONFIG_CRYPTO_USER_API_RNG=m 640CONFIG_CRYPTO_USER_API_RNG=m
630CONFIG_CRYPTO_USER_API_AEAD=m 641CONFIG_CRYPTO_USER_API_AEAD=m
631# CONFIG_CRYPTO_HW is not set 642# CONFIG_CRYPTO_HW is not set
643CONFIG_CRC32_SELFTEST=m
632CONFIG_XZ_DEC_TEST=m 644CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index d4de24963f5f..ca91d39555da 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_SYSV68_PARTITION=y 27CONFIG_SYSV68_PARTITION=y
28CONFIG_IOSCHED_DEADLINE=m 28CONFIG_IOSCHED_DEADLINE=m
29CONFIG_MQ_IOSCHED_DEADLINE=m
29CONFIG_KEXEC=y 30CONFIG_KEXEC=y
30CONFIG_BOOTINFO_PROC=y 31CONFIG_BOOTINFO_PROC=y
31CONFIG_M68020=y 32CONFIG_M68020=y
@@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m
58CONFIG_NET_FOU_IP_TUNNELS=y 59CONFIG_NET_FOU_IP_TUNNELS=y
59CONFIG_INET_AH=m 60CONFIG_INET_AH=m
60CONFIG_INET_ESP=m 61CONFIG_INET_ESP=m
62CONFIG_INET_ESP_OFFLOAD=m
61CONFIG_INET_IPCOMP=m 63CONFIG_INET_IPCOMP=m
62CONFIG_INET_XFRM_MODE_TRANSPORT=m 64CONFIG_INET_XFRM_MODE_TRANSPORT=m
63CONFIG_INET_XFRM_MODE_TUNNEL=m 65CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -69,6 +71,7 @@ CONFIG_IPV6=m
69CONFIG_IPV6_ROUTER_PREF=y 71CONFIG_IPV6_ROUTER_PREF=y
70CONFIG_INET6_AH=m 72CONFIG_INET6_AH=m
71CONFIG_INET6_ESP=m 73CONFIG_INET6_ESP=m
74CONFIG_INET6_ESP_OFFLOAD=m
72CONFIG_INET6_IPCOMP=m 75CONFIG_INET6_IPCOMP=m
73CONFIG_IPV6_ILA=m 76CONFIG_IPV6_ILA=m
74CONFIG_IPV6_VTI=m 77CONFIG_IPV6_VTI=m
@@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m
99CONFIG_NFT_CT=m 102CONFIG_NFT_CT=m
100CONFIG_NFT_SET_RBTREE=m 103CONFIG_NFT_SET_RBTREE=m
101CONFIG_NFT_SET_HASH=m 104CONFIG_NFT_SET_HASH=m
105CONFIG_NFT_SET_BITMAP=m
102CONFIG_NFT_COUNTER=m 106CONFIG_NFT_COUNTER=m
103CONFIG_NFT_LOG=m 107CONFIG_NFT_LOG=m
104CONFIG_NFT_LIMIT=m 108CONFIG_NFT_LIMIT=m
@@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m
296CONFIG_NET_L3_MASTER_DEV=y 300CONFIG_NET_L3_MASTER_DEV=y
297CONFIG_AF_KCM=m 301CONFIG_AF_KCM=m
298# CONFIG_WIRELESS is not set 302# CONFIG_WIRELESS is not set
303CONFIG_PSAMPLE=m
304CONFIG_NET_IFE=m
299CONFIG_NET_DEVLINK=m 305CONFIG_NET_DEVLINK=m
300# CONFIG_UEVENT_HELPER is not set 306# CONFIG_UEVENT_HELPER is not set
301CONFIG_DEVTMPFS=y 307CONFIG_DEVTMPFS=y
@@ -353,6 +359,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
353CONFIG_MACVLAN=m 359CONFIG_MACVLAN=m
354CONFIG_MACVTAP=m 360CONFIG_MACVTAP=m
355CONFIG_IPVLAN=m 361CONFIG_IPVLAN=m
362CONFIG_IPVTAP=m
356CONFIG_VXLAN=m 363CONFIG_VXLAN=m
357CONFIG_GENEVE=m 364CONFIG_GENEVE=m
358CONFIG_GTP=m 365CONFIG_GTP=m
@@ -362,6 +369,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y
362CONFIG_VETH=m 369CONFIG_VETH=m
363# CONFIG_NET_VENDOR_ALACRITECH is not set 370# CONFIG_NET_VENDOR_ALACRITECH is not set
364# CONFIG_NET_VENDOR_AMAZON is not set 371# CONFIG_NET_VENDOR_AMAZON is not set
372# CONFIG_NET_VENDOR_AQUANTIA is not set
365# CONFIG_NET_VENDOR_ARC is not set 373# CONFIG_NET_VENDOR_ARC is not set
366# CONFIG_NET_CADENCE is not set 374# CONFIG_NET_CADENCE is not set
367# CONFIG_NET_VENDOR_BROADCOM is not set 375# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -378,7 +386,6 @@ CONFIG_VETH=m
378# CONFIG_NET_VENDOR_SEEQ is not set 386# CONFIG_NET_VENDOR_SEEQ is not set
379# CONFIG_NET_VENDOR_SOLARFLARE is not set 387# CONFIG_NET_VENDOR_SOLARFLARE is not set
380# CONFIG_NET_VENDOR_STMICRO is not set 388# CONFIG_NET_VENDOR_STMICRO is not set
381# CONFIG_NET_VENDOR_SYNOPSYS is not set
382# CONFIG_NET_VENDOR_VIA is not set 389# CONFIG_NET_VENDOR_VIA is not set
383# CONFIG_NET_VENDOR_WIZNET is not set 390# CONFIG_NET_VENDOR_WIZNET is not set
384CONFIG_PPP=m 391CONFIG_PPP=m
@@ -523,6 +530,8 @@ CONFIG_NLS_MAC_TURKISH=m
523CONFIG_DLM=m 530CONFIG_DLM=m
524# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 531# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
525CONFIG_MAGIC_SYSRQ=y 532CONFIG_MAGIC_SYSRQ=y
533CONFIG_WW_MUTEX_SELFTEST=m
534CONFIG_ATOMIC64_SELFTEST=m
526CONFIG_ASYNC_RAID6_TEST=m 535CONFIG_ASYNC_RAID6_TEST=m
527CONFIG_TEST_HEXDUMP=m 536CONFIG_TEST_HEXDUMP=m
528CONFIG_TEST_STRING_HELPERS=m 537CONFIG_TEST_STRING_HELPERS=m
@@ -553,6 +562,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
553CONFIG_CRYPTO_LRW=m 562CONFIG_CRYPTO_LRW=m
554CONFIG_CRYPTO_PCBC=m 563CONFIG_CRYPTO_PCBC=m
555CONFIG_CRYPTO_KEYWRAP=m 564CONFIG_CRYPTO_KEYWRAP=m
565CONFIG_CRYPTO_CMAC=m
556CONFIG_CRYPTO_XCBC=m 566CONFIG_CRYPTO_XCBC=m
557CONFIG_CRYPTO_VMAC=m 567CONFIG_CRYPTO_VMAC=m
558CONFIG_CRYPTO_MICHAEL_MIC=m 568CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -564,6 +574,7 @@ CONFIG_CRYPTO_SHA512=m
564CONFIG_CRYPTO_SHA3=m 574CONFIG_CRYPTO_SHA3=m
565CONFIG_CRYPTO_TGR192=m 575CONFIG_CRYPTO_TGR192=m
566CONFIG_CRYPTO_WP512=m 576CONFIG_CRYPTO_WP512=m
577CONFIG_CRYPTO_AES_TI=m
567CONFIG_CRYPTO_ANUBIS=m 578CONFIG_CRYPTO_ANUBIS=m
568CONFIG_CRYPTO_BLOWFISH=m 579CONFIG_CRYPTO_BLOWFISH=m
569CONFIG_CRYPTO_CAMELLIA=m 580CONFIG_CRYPTO_CAMELLIA=m
@@ -588,4 +599,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
588CONFIG_CRYPTO_USER_API_RNG=m 599CONFIG_CRYPTO_USER_API_RNG=m
589CONFIG_CRYPTO_USER_API_AEAD=m 600CONFIG_CRYPTO_USER_API_AEAD=m
590# CONFIG_CRYPTO_HW is not set 601# CONFIG_CRYPTO_HW is not set
602CONFIG_CRC32_SELFTEST=m
591CONFIG_XZ_DEC_TEST=m 603CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index fc0fd3f871f3..23a3d8a691e2 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y
25# CONFIG_EFI_PARTITION is not set 25# CONFIG_EFI_PARTITION is not set
26CONFIG_SYSV68_PARTITION=y 26CONFIG_SYSV68_PARTITION=y
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68020=y 31CONFIG_M68020=y
@@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m
58CONFIG_NET_FOU_IP_TUNNELS=y 59CONFIG_NET_FOU_IP_TUNNELS=y
59CONFIG_INET_AH=m 60CONFIG_INET_AH=m
60CONFIG_INET_ESP=m 61CONFIG_INET_ESP=m
62CONFIG_INET_ESP_OFFLOAD=m
61CONFIG_INET_IPCOMP=m 63CONFIG_INET_IPCOMP=m
62CONFIG_INET_XFRM_MODE_TRANSPORT=m 64CONFIG_INET_XFRM_MODE_TRANSPORT=m
63CONFIG_INET_XFRM_MODE_TUNNEL=m 65CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -69,6 +71,7 @@ CONFIG_IPV6=m
69CONFIG_IPV6_ROUTER_PREF=y 71CONFIG_IPV6_ROUTER_PREF=y
70CONFIG_INET6_AH=m 72CONFIG_INET6_AH=m
71CONFIG_INET6_ESP=m 73CONFIG_INET6_ESP=m
74CONFIG_INET6_ESP_OFFLOAD=m
72CONFIG_INET6_IPCOMP=m 75CONFIG_INET6_IPCOMP=m
73CONFIG_IPV6_ILA=m 76CONFIG_IPV6_ILA=m
74CONFIG_IPV6_VTI=m 77CONFIG_IPV6_VTI=m
@@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m
99CONFIG_NFT_CT=m 102CONFIG_NFT_CT=m
100CONFIG_NFT_SET_RBTREE=m 103CONFIG_NFT_SET_RBTREE=m
101CONFIG_NFT_SET_HASH=m 104CONFIG_NFT_SET_HASH=m
105CONFIG_NFT_SET_BITMAP=m
102CONFIG_NFT_COUNTER=m 106CONFIG_NFT_COUNTER=m
103CONFIG_NFT_LOG=m 107CONFIG_NFT_LOG=m
104CONFIG_NFT_LIMIT=m 108CONFIG_NFT_LIMIT=m
@@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m
296CONFIG_NET_L3_MASTER_DEV=y 300CONFIG_NET_L3_MASTER_DEV=y
297CONFIG_AF_KCM=m 301CONFIG_AF_KCM=m
298# CONFIG_WIRELESS is not set 302# CONFIG_WIRELESS is not set
303CONFIG_PSAMPLE=m
304CONFIG_NET_IFE=m
299CONFIG_NET_DEVLINK=m 305CONFIG_NET_DEVLINK=m
300# CONFIG_UEVENT_HELPER is not set 306# CONFIG_UEVENT_HELPER is not set
301CONFIG_DEVTMPFS=y 307CONFIG_DEVTMPFS=y
@@ -362,6 +368,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
362CONFIG_MACVLAN=m 368CONFIG_MACVLAN=m
363CONFIG_MACVTAP=m 369CONFIG_MACVTAP=m
364CONFIG_IPVLAN=m 370CONFIG_IPVLAN=m
371CONFIG_IPVTAP=m
365CONFIG_VXLAN=m 372CONFIG_VXLAN=m
366CONFIG_GENEVE=m 373CONFIG_GENEVE=m
367CONFIG_GTP=m 374CONFIG_GTP=m
@@ -372,6 +379,7 @@ CONFIG_VETH=m
372# CONFIG_NET_VENDOR_ALACRITECH is not set 379# CONFIG_NET_VENDOR_ALACRITECH is not set
373# CONFIG_NET_VENDOR_AMAZON is not set 380# CONFIG_NET_VENDOR_AMAZON is not set
374CONFIG_ATARILANCE=y 381CONFIG_ATARILANCE=y
382# CONFIG_NET_VENDOR_AQUANTIA is not set
375# CONFIG_NET_VENDOR_ARC is not set 383# CONFIG_NET_VENDOR_ARC is not set
376# CONFIG_NET_CADENCE is not set 384# CONFIG_NET_CADENCE is not set
377# CONFIG_NET_VENDOR_BROADCOM is not set 385# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -389,7 +397,6 @@ CONFIG_NE2000=y
389# CONFIG_NET_VENDOR_SOLARFLARE is not set 397# CONFIG_NET_VENDOR_SOLARFLARE is not set
390CONFIG_SMC91X=y 398CONFIG_SMC91X=y
391# CONFIG_NET_VENDOR_STMICRO is not set 399# CONFIG_NET_VENDOR_STMICRO is not set
392# CONFIG_NET_VENDOR_SYNOPSYS is not set
393# CONFIG_NET_VENDOR_VIA is not set 400# CONFIG_NET_VENDOR_VIA is not set
394# CONFIG_NET_VENDOR_WIZNET is not set 401# CONFIG_NET_VENDOR_WIZNET is not set
395CONFIG_PPP=m 402CONFIG_PPP=m
@@ -544,6 +551,8 @@ CONFIG_NLS_MAC_TURKISH=m
544CONFIG_DLM=m 551CONFIG_DLM=m
545# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 552# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
546CONFIG_MAGIC_SYSRQ=y 553CONFIG_MAGIC_SYSRQ=y
554CONFIG_WW_MUTEX_SELFTEST=m
555CONFIG_ATOMIC64_SELFTEST=m
547CONFIG_ASYNC_RAID6_TEST=m 556CONFIG_ASYNC_RAID6_TEST=m
548CONFIG_TEST_HEXDUMP=m 557CONFIG_TEST_HEXDUMP=m
549CONFIG_TEST_STRING_HELPERS=m 558CONFIG_TEST_STRING_HELPERS=m
@@ -574,6 +583,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
574CONFIG_CRYPTO_LRW=m 583CONFIG_CRYPTO_LRW=m
575CONFIG_CRYPTO_PCBC=m 584CONFIG_CRYPTO_PCBC=m
576CONFIG_CRYPTO_KEYWRAP=m 585CONFIG_CRYPTO_KEYWRAP=m
586CONFIG_CRYPTO_CMAC=m
577CONFIG_CRYPTO_XCBC=m 587CONFIG_CRYPTO_XCBC=m
578CONFIG_CRYPTO_VMAC=m 588CONFIG_CRYPTO_VMAC=m
579CONFIG_CRYPTO_MICHAEL_MIC=m 589CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -585,6 +595,7 @@ CONFIG_CRYPTO_SHA512=m
585CONFIG_CRYPTO_SHA3=m 595CONFIG_CRYPTO_SHA3=m
586CONFIG_CRYPTO_TGR192=m 596CONFIG_CRYPTO_TGR192=m
587CONFIG_CRYPTO_WP512=m 597CONFIG_CRYPTO_WP512=m
598CONFIG_CRYPTO_AES_TI=m
588CONFIG_CRYPTO_ANUBIS=m 599CONFIG_CRYPTO_ANUBIS=m
589CONFIG_CRYPTO_BLOWFISH=m 600CONFIG_CRYPTO_BLOWFISH=m
590CONFIG_CRYPTO_CAMELLIA=m 601CONFIG_CRYPTO_CAMELLIA=m
@@ -609,4 +620,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
609CONFIG_CRYPTO_USER_API_RNG=m 620CONFIG_CRYPTO_USER_API_RNG=m
610CONFIG_CRYPTO_USER_API_AEAD=m 621CONFIG_CRYPTO_USER_API_AEAD=m
611# CONFIG_CRYPTO_HW is not set 622# CONFIG_CRYPTO_HW is not set
623CONFIG_CRC32_SELFTEST=m
612CONFIG_XZ_DEC_TEST=m 624CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 52e984a0aa69..95deb95140fe 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
25CONFIG_SUN_PARTITION=y 25CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68040=y 31CONFIG_M68040=y
@@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m
56CONFIG_NET_FOU_IP_TUNNELS=y 57CONFIG_NET_FOU_IP_TUNNELS=y
57CONFIG_INET_AH=m 58CONFIG_INET_AH=m
58CONFIG_INET_ESP=m 59CONFIG_INET_ESP=m
60CONFIG_INET_ESP_OFFLOAD=m
59CONFIG_INET_IPCOMP=m 61CONFIG_INET_IPCOMP=m
60CONFIG_INET_XFRM_MODE_TRANSPORT=m 62CONFIG_INET_XFRM_MODE_TRANSPORT=m
61CONFIG_INET_XFRM_MODE_TUNNEL=m 63CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -67,6 +69,7 @@ CONFIG_IPV6=m
67CONFIG_IPV6_ROUTER_PREF=y 69CONFIG_IPV6_ROUTER_PREF=y
68CONFIG_INET6_AH=m 70CONFIG_INET6_AH=m
69CONFIG_INET6_ESP=m 71CONFIG_INET6_ESP=m
72CONFIG_INET6_ESP_OFFLOAD=m
70CONFIG_INET6_IPCOMP=m 73CONFIG_INET6_IPCOMP=m
71CONFIG_IPV6_ILA=m 74CONFIG_IPV6_ILA=m
72CONFIG_IPV6_VTI=m 75CONFIG_IPV6_VTI=m
@@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m
97CONFIG_NFT_CT=m 100CONFIG_NFT_CT=m
98CONFIG_NFT_SET_RBTREE=m 101CONFIG_NFT_SET_RBTREE=m
99CONFIG_NFT_SET_HASH=m 102CONFIG_NFT_SET_HASH=m
103CONFIG_NFT_SET_BITMAP=m
100CONFIG_NFT_COUNTER=m 104CONFIG_NFT_COUNTER=m
101CONFIG_NFT_LOG=m 105CONFIG_NFT_LOG=m
102CONFIG_NFT_LIMIT=m 106CONFIG_NFT_LIMIT=m
@@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m
294CONFIG_NET_L3_MASTER_DEV=y 298CONFIG_NET_L3_MASTER_DEV=y
295CONFIG_AF_KCM=m 299CONFIG_AF_KCM=m
296# CONFIG_WIRELESS is not set 300# CONFIG_WIRELESS is not set
301CONFIG_PSAMPLE=m
302CONFIG_NET_IFE=m
297CONFIG_NET_DEVLINK=m 303CONFIG_NET_DEVLINK=m
298# CONFIG_UEVENT_HELPER is not set 304# CONFIG_UEVENT_HELPER is not set
299CONFIG_DEVTMPFS=y 305CONFIG_DEVTMPFS=y
@@ -352,6 +358,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
352CONFIG_MACVLAN=m 358CONFIG_MACVLAN=m
353CONFIG_MACVTAP=m 359CONFIG_MACVTAP=m
354CONFIG_IPVLAN=m 360CONFIG_IPVLAN=m
361CONFIG_IPVTAP=m
355CONFIG_VXLAN=m 362CONFIG_VXLAN=m
356CONFIG_GENEVE=m 363CONFIG_GENEVE=m
357CONFIG_GTP=m 364CONFIG_GTP=m
@@ -361,6 +368,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y
361CONFIG_VETH=m 368CONFIG_VETH=m
362# CONFIG_NET_VENDOR_ALACRITECH is not set 369# CONFIG_NET_VENDOR_ALACRITECH is not set
363# CONFIG_NET_VENDOR_AMAZON is not set 370# CONFIG_NET_VENDOR_AMAZON is not set
371# CONFIG_NET_VENDOR_AQUANTIA is not set
364# CONFIG_NET_VENDOR_ARC is not set 372# CONFIG_NET_VENDOR_ARC is not set
365# CONFIG_NET_CADENCE is not set 373# CONFIG_NET_CADENCE is not set
366# CONFIG_NET_VENDOR_BROADCOM is not set 374# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -377,7 +385,6 @@ CONFIG_BVME6000_NET=y
377# CONFIG_NET_VENDOR_SEEQ is not set 385# CONFIG_NET_VENDOR_SEEQ is not set
378# CONFIG_NET_VENDOR_SOLARFLARE is not set 386# CONFIG_NET_VENDOR_SOLARFLARE is not set
379# CONFIG_NET_VENDOR_STMICRO is not set 387# CONFIG_NET_VENDOR_STMICRO is not set
380# CONFIG_NET_VENDOR_SYNOPSYS is not set
381# CONFIG_NET_VENDOR_VIA is not set 388# CONFIG_NET_VENDOR_VIA is not set
382# CONFIG_NET_VENDOR_WIZNET is not set 389# CONFIG_NET_VENDOR_WIZNET is not set
383CONFIG_PPP=m 390CONFIG_PPP=m
@@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m
515CONFIG_DLM=m 522CONFIG_DLM=m
516# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 523# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
517CONFIG_MAGIC_SYSRQ=y 524CONFIG_MAGIC_SYSRQ=y
525CONFIG_WW_MUTEX_SELFTEST=m
526CONFIG_ATOMIC64_SELFTEST=m
518CONFIG_ASYNC_RAID6_TEST=m 527CONFIG_ASYNC_RAID6_TEST=m
519CONFIG_TEST_HEXDUMP=m 528CONFIG_TEST_HEXDUMP=m
520CONFIG_TEST_STRING_HELPERS=m 529CONFIG_TEST_STRING_HELPERS=m
@@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
545CONFIG_CRYPTO_LRW=m 554CONFIG_CRYPTO_LRW=m
546CONFIG_CRYPTO_PCBC=m 555CONFIG_CRYPTO_PCBC=m
547CONFIG_CRYPTO_KEYWRAP=m 556CONFIG_CRYPTO_KEYWRAP=m
557CONFIG_CRYPTO_CMAC=m
548CONFIG_CRYPTO_XCBC=m 558CONFIG_CRYPTO_XCBC=m
549CONFIG_CRYPTO_VMAC=m 559CONFIG_CRYPTO_VMAC=m
550CONFIG_CRYPTO_MICHAEL_MIC=m 560CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m
556CONFIG_CRYPTO_SHA3=m 566CONFIG_CRYPTO_SHA3=m
557CONFIG_CRYPTO_TGR192=m 567CONFIG_CRYPTO_TGR192=m
558CONFIG_CRYPTO_WP512=m 568CONFIG_CRYPTO_WP512=m
569CONFIG_CRYPTO_AES_TI=m
559CONFIG_CRYPTO_ANUBIS=m 570CONFIG_CRYPTO_ANUBIS=m
560CONFIG_CRYPTO_BLOWFISH=m 571CONFIG_CRYPTO_BLOWFISH=m
561CONFIG_CRYPTO_CAMELLIA=m 572CONFIG_CRYPTO_CAMELLIA=m
@@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
580CONFIG_CRYPTO_USER_API_RNG=m 591CONFIG_CRYPTO_USER_API_RNG=m
581CONFIG_CRYPTO_USER_API_AEAD=m 592CONFIG_CRYPTO_USER_API_AEAD=m
582# CONFIG_CRYPTO_HW is not set 593# CONFIG_CRYPTO_HW is not set
594CONFIG_CRC32_SELFTEST=m
583CONFIG_XZ_DEC_TEST=m 595CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index aaeed4422cc9..afae6958db2d 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_SYSV68_PARTITION=y 27CONFIG_SYSV68_PARTITION=y
28CONFIG_IOSCHED_DEADLINE=m 28CONFIG_IOSCHED_DEADLINE=m
29CONFIG_MQ_IOSCHED_DEADLINE=m
29CONFIG_KEXEC=y 30CONFIG_KEXEC=y
30CONFIG_BOOTINFO_PROC=y 31CONFIG_BOOTINFO_PROC=y
31CONFIG_M68020=y 32CONFIG_M68020=y
@@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m
58CONFIG_NET_FOU_IP_TUNNELS=y 59CONFIG_NET_FOU_IP_TUNNELS=y
59CONFIG_INET_AH=m 60CONFIG_INET_AH=m
60CONFIG_INET_ESP=m 61CONFIG_INET_ESP=m
62CONFIG_INET_ESP_OFFLOAD=m
61CONFIG_INET_IPCOMP=m 63CONFIG_INET_IPCOMP=m
62CONFIG_INET_XFRM_MODE_TRANSPORT=m 64CONFIG_INET_XFRM_MODE_TRANSPORT=m
63CONFIG_INET_XFRM_MODE_TUNNEL=m 65CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -69,6 +71,7 @@ CONFIG_IPV6=m
69CONFIG_IPV6_ROUTER_PREF=y 71CONFIG_IPV6_ROUTER_PREF=y
70CONFIG_INET6_AH=m 72CONFIG_INET6_AH=m
71CONFIG_INET6_ESP=m 73CONFIG_INET6_ESP=m
74CONFIG_INET6_ESP_OFFLOAD=m
72CONFIG_INET6_IPCOMP=m 75CONFIG_INET6_IPCOMP=m
73CONFIG_IPV6_ILA=m 76CONFIG_IPV6_ILA=m
74CONFIG_IPV6_VTI=m 77CONFIG_IPV6_VTI=m
@@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m
99CONFIG_NFT_CT=m 102CONFIG_NFT_CT=m
100CONFIG_NFT_SET_RBTREE=m 103CONFIG_NFT_SET_RBTREE=m
101CONFIG_NFT_SET_HASH=m 104CONFIG_NFT_SET_HASH=m
105CONFIG_NFT_SET_BITMAP=m
102CONFIG_NFT_COUNTER=m 106CONFIG_NFT_COUNTER=m
103CONFIG_NFT_LOG=m 107CONFIG_NFT_LOG=m
104CONFIG_NFT_LIMIT=m 108CONFIG_NFT_LIMIT=m
@@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m
296CONFIG_NET_L3_MASTER_DEV=y 300CONFIG_NET_L3_MASTER_DEV=y
297CONFIG_AF_KCM=m 301CONFIG_AF_KCM=m
298# CONFIG_WIRELESS is not set 302# CONFIG_WIRELESS is not set
303CONFIG_PSAMPLE=m
304CONFIG_NET_IFE=m
299CONFIG_NET_DEVLINK=m 305CONFIG_NET_DEVLINK=m
300# CONFIG_UEVENT_HELPER is not set 306# CONFIG_UEVENT_HELPER is not set
301CONFIG_DEVTMPFS=y 307CONFIG_DEVTMPFS=y
@@ -353,6 +359,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
353CONFIG_MACVLAN=m 359CONFIG_MACVLAN=m
354CONFIG_MACVTAP=m 360CONFIG_MACVTAP=m
355CONFIG_IPVLAN=m 361CONFIG_IPVLAN=m
362CONFIG_IPVTAP=m
356CONFIG_VXLAN=m 363CONFIG_VXLAN=m
357CONFIG_GENEVE=m 364CONFIG_GENEVE=m
358CONFIG_GTP=m 365CONFIG_GTP=m
@@ -363,6 +370,7 @@ CONFIG_VETH=m
363# CONFIG_NET_VENDOR_ALACRITECH is not set 370# CONFIG_NET_VENDOR_ALACRITECH is not set
364# CONFIG_NET_VENDOR_AMAZON is not set 371# CONFIG_NET_VENDOR_AMAZON is not set
365CONFIG_HPLANCE=y 372CONFIG_HPLANCE=y
373# CONFIG_NET_VENDOR_AQUANTIA is not set
366# CONFIG_NET_VENDOR_ARC is not set 374# CONFIG_NET_VENDOR_ARC is not set
367# CONFIG_NET_CADENCE is not set 375# CONFIG_NET_CADENCE is not set
368# CONFIG_NET_VENDOR_BROADCOM is not set 376# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -379,7 +387,6 @@ CONFIG_HPLANCE=y
379# CONFIG_NET_VENDOR_SEEQ is not set 387# CONFIG_NET_VENDOR_SEEQ is not set
380# CONFIG_NET_VENDOR_SOLARFLARE is not set 388# CONFIG_NET_VENDOR_SOLARFLARE is not set
381# CONFIG_NET_VENDOR_STMICRO is not set 389# CONFIG_NET_VENDOR_STMICRO is not set
382# CONFIG_NET_VENDOR_SYNOPSYS is not set
383# CONFIG_NET_VENDOR_VIA is not set 390# CONFIG_NET_VENDOR_VIA is not set
384# CONFIG_NET_VENDOR_WIZNET is not set 391# CONFIG_NET_VENDOR_WIZNET is not set
385CONFIG_PPP=m 392CONFIG_PPP=m
@@ -525,6 +532,8 @@ CONFIG_NLS_MAC_TURKISH=m
525CONFIG_DLM=m 532CONFIG_DLM=m
526# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 533# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
527CONFIG_MAGIC_SYSRQ=y 534CONFIG_MAGIC_SYSRQ=y
535CONFIG_WW_MUTEX_SELFTEST=m
536CONFIG_ATOMIC64_SELFTEST=m
528CONFIG_ASYNC_RAID6_TEST=m 537CONFIG_ASYNC_RAID6_TEST=m
529CONFIG_TEST_HEXDUMP=m 538CONFIG_TEST_HEXDUMP=m
530CONFIG_TEST_STRING_HELPERS=m 539CONFIG_TEST_STRING_HELPERS=m
@@ -555,6 +564,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
555CONFIG_CRYPTO_LRW=m 564CONFIG_CRYPTO_LRW=m
556CONFIG_CRYPTO_PCBC=m 565CONFIG_CRYPTO_PCBC=m
557CONFIG_CRYPTO_KEYWRAP=m 566CONFIG_CRYPTO_KEYWRAP=m
567CONFIG_CRYPTO_CMAC=m
558CONFIG_CRYPTO_XCBC=m 568CONFIG_CRYPTO_XCBC=m
559CONFIG_CRYPTO_VMAC=m 569CONFIG_CRYPTO_VMAC=m
560CONFIG_CRYPTO_MICHAEL_MIC=m 570CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -566,6 +576,7 @@ CONFIG_CRYPTO_SHA512=m
566CONFIG_CRYPTO_SHA3=m 576CONFIG_CRYPTO_SHA3=m
567CONFIG_CRYPTO_TGR192=m 577CONFIG_CRYPTO_TGR192=m
568CONFIG_CRYPTO_WP512=m 578CONFIG_CRYPTO_WP512=m
579CONFIG_CRYPTO_AES_TI=m
569CONFIG_CRYPTO_ANUBIS=m 580CONFIG_CRYPTO_ANUBIS=m
570CONFIG_CRYPTO_BLOWFISH=m 581CONFIG_CRYPTO_BLOWFISH=m
571CONFIG_CRYPTO_CAMELLIA=m 582CONFIG_CRYPTO_CAMELLIA=m
@@ -590,4 +601,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
590CONFIG_CRYPTO_USER_API_RNG=m 601CONFIG_CRYPTO_USER_API_RNG=m
591CONFIG_CRYPTO_USER_API_AEAD=m 602CONFIG_CRYPTO_USER_API_AEAD=m
592# CONFIG_CRYPTO_HW is not set 603# CONFIG_CRYPTO_HW is not set
604CONFIG_CRC32_SELFTEST=m
593CONFIG_XZ_DEC_TEST=m 605CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 3bbc9b2f0dac..b010734729a7 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y
25# CONFIG_EFI_PARTITION is not set 25# CONFIG_EFI_PARTITION is not set
26CONFIG_SYSV68_PARTITION=y 26CONFIG_SYSV68_PARTITION=y
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68020=y 31CONFIG_M68020=y
@@ -57,6 +58,7 @@ CONFIG_NET_IPVTI=m
57CONFIG_NET_FOU_IP_TUNNELS=y 58CONFIG_NET_FOU_IP_TUNNELS=y
58CONFIG_INET_AH=m 59CONFIG_INET_AH=m
59CONFIG_INET_ESP=m 60CONFIG_INET_ESP=m
61CONFIG_INET_ESP_OFFLOAD=m
60CONFIG_INET_IPCOMP=m 62CONFIG_INET_IPCOMP=m
61CONFIG_INET_XFRM_MODE_TRANSPORT=m 63CONFIG_INET_XFRM_MODE_TRANSPORT=m
62CONFIG_INET_XFRM_MODE_TUNNEL=m 64CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -68,6 +70,7 @@ CONFIG_IPV6=m
68CONFIG_IPV6_ROUTER_PREF=y 70CONFIG_IPV6_ROUTER_PREF=y
69CONFIG_INET6_AH=m 71CONFIG_INET6_AH=m
70CONFIG_INET6_ESP=m 72CONFIG_INET6_ESP=m
73CONFIG_INET6_ESP_OFFLOAD=m
71CONFIG_INET6_IPCOMP=m 74CONFIG_INET6_IPCOMP=m
72CONFIG_IPV6_ILA=m 75CONFIG_IPV6_ILA=m
73CONFIG_IPV6_VTI=m 76CONFIG_IPV6_VTI=m
@@ -98,6 +101,7 @@ CONFIG_NFT_NUMGEN=m
98CONFIG_NFT_CT=m 101CONFIG_NFT_CT=m
99CONFIG_NFT_SET_RBTREE=m 102CONFIG_NFT_SET_RBTREE=m
100CONFIG_NFT_SET_HASH=m 103CONFIG_NFT_SET_HASH=m
104CONFIG_NFT_SET_BITMAP=m
101CONFIG_NFT_COUNTER=m 105CONFIG_NFT_COUNTER=m
102CONFIG_NFT_LOG=m 106CONFIG_NFT_LOG=m
103CONFIG_NFT_LIMIT=m 107CONFIG_NFT_LIMIT=m
@@ -298,6 +302,8 @@ CONFIG_MPLS_IPTUNNEL=m
298CONFIG_NET_L3_MASTER_DEV=y 302CONFIG_NET_L3_MASTER_DEV=y
299CONFIG_AF_KCM=m 303CONFIG_AF_KCM=m
300# CONFIG_WIRELESS is not set 304# CONFIG_WIRELESS is not set
305CONFIG_PSAMPLE=m
306CONFIG_NET_IFE=m
301CONFIG_NET_DEVLINK=m 307CONFIG_NET_DEVLINK=m
302# CONFIG_UEVENT_HELPER is not set 308# CONFIG_UEVENT_HELPER is not set
303CONFIG_DEVTMPFS=y 309CONFIG_DEVTMPFS=y
@@ -369,6 +375,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
369CONFIG_MACVLAN=m 375CONFIG_MACVLAN=m
370CONFIG_MACVTAP=m 376CONFIG_MACVTAP=m
371CONFIG_IPVLAN=m 377CONFIG_IPVLAN=m
378CONFIG_IPVTAP=m
372CONFIG_VXLAN=m 379CONFIG_VXLAN=m
373CONFIG_GENEVE=m 380CONFIG_GENEVE=m
374CONFIG_GTP=m 381CONFIG_GTP=m
@@ -379,6 +386,7 @@ CONFIG_VETH=m
379# CONFIG_NET_VENDOR_ALACRITECH is not set 386# CONFIG_NET_VENDOR_ALACRITECH is not set
380# CONFIG_NET_VENDOR_AMAZON is not set 387# CONFIG_NET_VENDOR_AMAZON is not set
381CONFIG_MACMACE=y 388CONFIG_MACMACE=y
389# CONFIG_NET_VENDOR_AQUANTIA is not set
382# CONFIG_NET_VENDOR_ARC is not set 390# CONFIG_NET_VENDOR_ARC is not set
383# CONFIG_NET_CADENCE is not set 391# CONFIG_NET_CADENCE is not set
384# CONFIG_NET_VENDOR_BROADCOM is not set 392# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -398,7 +406,6 @@ CONFIG_MAC8390=y
398# CONFIG_NET_VENDOR_SOLARFLARE is not set 406# CONFIG_NET_VENDOR_SOLARFLARE is not set
399# CONFIG_NET_VENDOR_SMSC is not set 407# CONFIG_NET_VENDOR_SMSC is not set
400# CONFIG_NET_VENDOR_STMICRO is not set 408# CONFIG_NET_VENDOR_STMICRO is not set
401# CONFIG_NET_VENDOR_SYNOPSYS is not set
402# CONFIG_NET_VENDOR_VIA is not set 409# CONFIG_NET_VENDOR_VIA is not set
403# CONFIG_NET_VENDOR_WIZNET is not set 410# CONFIG_NET_VENDOR_WIZNET is not set
404CONFIG_PPP=m 411CONFIG_PPP=m
@@ -547,6 +554,8 @@ CONFIG_NLS_MAC_TURKISH=m
547CONFIG_DLM=m 554CONFIG_DLM=m
548# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 555# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
549CONFIG_MAGIC_SYSRQ=y 556CONFIG_MAGIC_SYSRQ=y
557CONFIG_WW_MUTEX_SELFTEST=m
558CONFIG_ATOMIC64_SELFTEST=m
550CONFIG_ASYNC_RAID6_TEST=m 559CONFIG_ASYNC_RAID6_TEST=m
551CONFIG_TEST_HEXDUMP=m 560CONFIG_TEST_HEXDUMP=m
552CONFIG_TEST_STRING_HELPERS=m 561CONFIG_TEST_STRING_HELPERS=m
@@ -577,6 +586,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
577CONFIG_CRYPTO_LRW=m 586CONFIG_CRYPTO_LRW=m
578CONFIG_CRYPTO_PCBC=m 587CONFIG_CRYPTO_PCBC=m
579CONFIG_CRYPTO_KEYWRAP=m 588CONFIG_CRYPTO_KEYWRAP=m
589CONFIG_CRYPTO_CMAC=m
580CONFIG_CRYPTO_XCBC=m 590CONFIG_CRYPTO_XCBC=m
581CONFIG_CRYPTO_VMAC=m 591CONFIG_CRYPTO_VMAC=m
582CONFIG_CRYPTO_MICHAEL_MIC=m 592CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -588,6 +598,7 @@ CONFIG_CRYPTO_SHA512=m
588CONFIG_CRYPTO_SHA3=m 598CONFIG_CRYPTO_SHA3=m
589CONFIG_CRYPTO_TGR192=m 599CONFIG_CRYPTO_TGR192=m
590CONFIG_CRYPTO_WP512=m 600CONFIG_CRYPTO_WP512=m
601CONFIG_CRYPTO_AES_TI=m
591CONFIG_CRYPTO_ANUBIS=m 602CONFIG_CRYPTO_ANUBIS=m
592CONFIG_CRYPTO_BLOWFISH=m 603CONFIG_CRYPTO_BLOWFISH=m
593CONFIG_CRYPTO_CAMELLIA=m 604CONFIG_CRYPTO_CAMELLIA=m
@@ -612,4 +623,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
612CONFIG_CRYPTO_USER_API_RNG=m 623CONFIG_CRYPTO_USER_API_RNG=m
613CONFIG_CRYPTO_USER_API_AEAD=m 624CONFIG_CRYPTO_USER_API_AEAD=m
614# CONFIG_CRYPTO_HW is not set 625# CONFIG_CRYPTO_HW is not set
626CONFIG_CRC32_SELFTEST=m
615CONFIG_XZ_DEC_TEST=m 627CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 8f2c0decb2f8..0e414549b235 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -21,6 +21,7 @@ CONFIG_SOLARIS_X86_PARTITION=y
21CONFIG_UNIXWARE_DISKLABEL=y 21CONFIG_UNIXWARE_DISKLABEL=y
22# CONFIG_EFI_PARTITION is not set 22# CONFIG_EFI_PARTITION is not set
23CONFIG_IOSCHED_DEADLINE=m 23CONFIG_IOSCHED_DEADLINE=m
24CONFIG_MQ_IOSCHED_DEADLINE=m
24CONFIG_KEXEC=y 25CONFIG_KEXEC=y
25CONFIG_BOOTINFO_PROC=y 26CONFIG_BOOTINFO_PROC=y
26CONFIG_M68020=y 27CONFIG_M68020=y
@@ -67,6 +68,7 @@ CONFIG_NET_IPVTI=m
67CONFIG_NET_FOU_IP_TUNNELS=y 68CONFIG_NET_FOU_IP_TUNNELS=y
68CONFIG_INET_AH=m 69CONFIG_INET_AH=m
69CONFIG_INET_ESP=m 70CONFIG_INET_ESP=m
71CONFIG_INET_ESP_OFFLOAD=m
70CONFIG_INET_IPCOMP=m 72CONFIG_INET_IPCOMP=m
71CONFIG_INET_XFRM_MODE_TRANSPORT=m 73CONFIG_INET_XFRM_MODE_TRANSPORT=m
72CONFIG_INET_XFRM_MODE_TUNNEL=m 74CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -78,6 +80,7 @@ CONFIG_IPV6=m
78CONFIG_IPV6_ROUTER_PREF=y 80CONFIG_IPV6_ROUTER_PREF=y
79CONFIG_INET6_AH=m 81CONFIG_INET6_AH=m
80CONFIG_INET6_ESP=m 82CONFIG_INET6_ESP=m
83CONFIG_INET6_ESP_OFFLOAD=m
81CONFIG_INET6_IPCOMP=m 84CONFIG_INET6_IPCOMP=m
82CONFIG_IPV6_ILA=m 85CONFIG_IPV6_ILA=m
83CONFIG_IPV6_VTI=m 86CONFIG_IPV6_VTI=m
@@ -108,6 +111,7 @@ CONFIG_NFT_NUMGEN=m
108CONFIG_NFT_CT=m 111CONFIG_NFT_CT=m
109CONFIG_NFT_SET_RBTREE=m 112CONFIG_NFT_SET_RBTREE=m
110CONFIG_NFT_SET_HASH=m 113CONFIG_NFT_SET_HASH=m
114CONFIG_NFT_SET_BITMAP=m
111CONFIG_NFT_COUNTER=m 115CONFIG_NFT_COUNTER=m
112CONFIG_NFT_LOG=m 116CONFIG_NFT_LOG=m
113CONFIG_NFT_LIMIT=m 117CONFIG_NFT_LIMIT=m
@@ -308,6 +312,8 @@ CONFIG_MPLS_IPTUNNEL=m
308CONFIG_NET_L3_MASTER_DEV=y 312CONFIG_NET_L3_MASTER_DEV=y
309CONFIG_AF_KCM=m 313CONFIG_AF_KCM=m
310# CONFIG_WIRELESS is not set 314# CONFIG_WIRELESS is not set
315CONFIG_PSAMPLE=m
316CONFIG_NET_IFE=m
311CONFIG_NET_DEVLINK=m 317CONFIG_NET_DEVLINK=m
312# CONFIG_UEVENT_HELPER is not set 318# CONFIG_UEVENT_HELPER is not set
313CONFIG_DEVTMPFS=y 319CONFIG_DEVTMPFS=y
@@ -402,6 +408,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
402CONFIG_MACVLAN=m 408CONFIG_MACVLAN=m
403CONFIG_MACVTAP=m 409CONFIG_MACVTAP=m
404CONFIG_IPVLAN=m 410CONFIG_IPVLAN=m
411CONFIG_IPVTAP=m
405CONFIG_VXLAN=m 412CONFIG_VXLAN=m
406CONFIG_GENEVE=m 413CONFIG_GENEVE=m
407CONFIG_GTP=m 414CONFIG_GTP=m
@@ -419,6 +426,7 @@ CONFIG_HPLANCE=y
419CONFIG_MVME147_NET=y 426CONFIG_MVME147_NET=y
420CONFIG_SUN3LANCE=y 427CONFIG_SUN3LANCE=y
421CONFIG_MACMACE=y 428CONFIG_MACMACE=y
429# CONFIG_NET_VENDOR_AQUANTIA is not set
422# CONFIG_NET_VENDOR_ARC is not set 430# CONFIG_NET_VENDOR_ARC is not set
423# CONFIG_NET_CADENCE is not set 431# CONFIG_NET_CADENCE is not set
424# CONFIG_NET_VENDOR_BROADCOM is not set 432# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -444,7 +452,6 @@ CONFIG_ZORRO8390=y
444# CONFIG_NET_VENDOR_SOLARFLARE is not set 452# CONFIG_NET_VENDOR_SOLARFLARE is not set
445CONFIG_SMC91X=y 453CONFIG_SMC91X=y
446# CONFIG_NET_VENDOR_STMICRO is not set 454# CONFIG_NET_VENDOR_STMICRO is not set
447# CONFIG_NET_VENDOR_SYNOPSYS is not set
448# CONFIG_NET_VENDOR_VIA is not set 455# CONFIG_NET_VENDOR_VIA is not set
449# CONFIG_NET_VENDOR_WIZNET is not set 456# CONFIG_NET_VENDOR_WIZNET is not set
450CONFIG_PLIP=m 457CONFIG_PLIP=m
@@ -627,6 +634,8 @@ CONFIG_NLS_MAC_TURKISH=m
627CONFIG_DLM=m 634CONFIG_DLM=m
628# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 635# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
629CONFIG_MAGIC_SYSRQ=y 636CONFIG_MAGIC_SYSRQ=y
637CONFIG_WW_MUTEX_SELFTEST=m
638CONFIG_ATOMIC64_SELFTEST=m
630CONFIG_ASYNC_RAID6_TEST=m 639CONFIG_ASYNC_RAID6_TEST=m
631CONFIG_TEST_HEXDUMP=m 640CONFIG_TEST_HEXDUMP=m
632CONFIG_TEST_STRING_HELPERS=m 641CONFIG_TEST_STRING_HELPERS=m
@@ -657,6 +666,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
657CONFIG_CRYPTO_LRW=m 666CONFIG_CRYPTO_LRW=m
658CONFIG_CRYPTO_PCBC=m 667CONFIG_CRYPTO_PCBC=m
659CONFIG_CRYPTO_KEYWRAP=m 668CONFIG_CRYPTO_KEYWRAP=m
669CONFIG_CRYPTO_CMAC=m
660CONFIG_CRYPTO_XCBC=m 670CONFIG_CRYPTO_XCBC=m
661CONFIG_CRYPTO_VMAC=m 671CONFIG_CRYPTO_VMAC=m
662CONFIG_CRYPTO_MICHAEL_MIC=m 672CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -668,6 +678,7 @@ CONFIG_CRYPTO_SHA512=m
668CONFIG_CRYPTO_SHA3=m 678CONFIG_CRYPTO_SHA3=m
669CONFIG_CRYPTO_TGR192=m 679CONFIG_CRYPTO_TGR192=m
670CONFIG_CRYPTO_WP512=m 680CONFIG_CRYPTO_WP512=m
681CONFIG_CRYPTO_AES_TI=m
671CONFIG_CRYPTO_ANUBIS=m 682CONFIG_CRYPTO_ANUBIS=m
672CONFIG_CRYPTO_BLOWFISH=m 683CONFIG_CRYPTO_BLOWFISH=m
673CONFIG_CRYPTO_CAMELLIA=m 684CONFIG_CRYPTO_CAMELLIA=m
@@ -692,4 +703,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
692CONFIG_CRYPTO_USER_API_RNG=m 703CONFIG_CRYPTO_USER_API_RNG=m
693CONFIG_CRYPTO_USER_API_AEAD=m 704CONFIG_CRYPTO_USER_API_AEAD=m
694# CONFIG_CRYPTO_HW is not set 705# CONFIG_CRYPTO_HW is not set
706CONFIG_CRC32_SELFTEST=m
695CONFIG_XZ_DEC_TEST=m 707CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index c743dd22e96f..b2e687a0ec3d 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
25CONFIG_SUN_PARTITION=y 25CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68030=y 31CONFIG_M68030=y
@@ -55,6 +56,7 @@ CONFIG_NET_IPVTI=m
55CONFIG_NET_FOU_IP_TUNNELS=y 56CONFIG_NET_FOU_IP_TUNNELS=y
56CONFIG_INET_AH=m 57CONFIG_INET_AH=m
57CONFIG_INET_ESP=m 58CONFIG_INET_ESP=m
59CONFIG_INET_ESP_OFFLOAD=m
58CONFIG_INET_IPCOMP=m 60CONFIG_INET_IPCOMP=m
59CONFIG_INET_XFRM_MODE_TRANSPORT=m 61CONFIG_INET_XFRM_MODE_TRANSPORT=m
60CONFIG_INET_XFRM_MODE_TUNNEL=m 62CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -66,6 +68,7 @@ CONFIG_IPV6=m
66CONFIG_IPV6_ROUTER_PREF=y 68CONFIG_IPV6_ROUTER_PREF=y
67CONFIG_INET6_AH=m 69CONFIG_INET6_AH=m
68CONFIG_INET6_ESP=m 70CONFIG_INET6_ESP=m
71CONFIG_INET6_ESP_OFFLOAD=m
69CONFIG_INET6_IPCOMP=m 72CONFIG_INET6_IPCOMP=m
70CONFIG_IPV6_ILA=m 73CONFIG_IPV6_ILA=m
71CONFIG_IPV6_VTI=m 74CONFIG_IPV6_VTI=m
@@ -96,6 +99,7 @@ CONFIG_NFT_NUMGEN=m
96CONFIG_NFT_CT=m 99CONFIG_NFT_CT=m
97CONFIG_NFT_SET_RBTREE=m 100CONFIG_NFT_SET_RBTREE=m
98CONFIG_NFT_SET_HASH=m 101CONFIG_NFT_SET_HASH=m
102CONFIG_NFT_SET_BITMAP=m
99CONFIG_NFT_COUNTER=m 103CONFIG_NFT_COUNTER=m
100CONFIG_NFT_LOG=m 104CONFIG_NFT_LOG=m
101CONFIG_NFT_LIMIT=m 105CONFIG_NFT_LIMIT=m
@@ -293,6 +297,8 @@ CONFIG_MPLS_IPTUNNEL=m
293CONFIG_NET_L3_MASTER_DEV=y 297CONFIG_NET_L3_MASTER_DEV=y
294CONFIG_AF_KCM=m 298CONFIG_AF_KCM=m
295# CONFIG_WIRELESS is not set 299# CONFIG_WIRELESS is not set
300CONFIG_PSAMPLE=m
301CONFIG_NET_IFE=m
296CONFIG_NET_DEVLINK=m 302CONFIG_NET_DEVLINK=m
297# CONFIG_UEVENT_HELPER is not set 303# CONFIG_UEVENT_HELPER is not set
298CONFIG_DEVTMPFS=y 304CONFIG_DEVTMPFS=y
@@ -351,6 +357,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
351CONFIG_MACVLAN=m 357CONFIG_MACVLAN=m
352CONFIG_MACVTAP=m 358CONFIG_MACVTAP=m
353CONFIG_IPVLAN=m 359CONFIG_IPVLAN=m
360CONFIG_IPVTAP=m
354CONFIG_VXLAN=m 361CONFIG_VXLAN=m
355CONFIG_GENEVE=m 362CONFIG_GENEVE=m
356CONFIG_GTP=m 363CONFIG_GTP=m
@@ -361,6 +368,7 @@ CONFIG_VETH=m
361# CONFIG_NET_VENDOR_ALACRITECH is not set 368# CONFIG_NET_VENDOR_ALACRITECH is not set
362# CONFIG_NET_VENDOR_AMAZON is not set 369# CONFIG_NET_VENDOR_AMAZON is not set
363CONFIG_MVME147_NET=y 370CONFIG_MVME147_NET=y
371# CONFIG_NET_VENDOR_AQUANTIA is not set
364# CONFIG_NET_VENDOR_ARC is not set 372# CONFIG_NET_VENDOR_ARC is not set
365# CONFIG_NET_CADENCE is not set 373# CONFIG_NET_CADENCE is not set
366# CONFIG_NET_VENDOR_BROADCOM is not set 374# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -377,7 +385,6 @@ CONFIG_MVME147_NET=y
377# CONFIG_NET_VENDOR_SEEQ is not set 385# CONFIG_NET_VENDOR_SEEQ is not set
378# CONFIG_NET_VENDOR_SOLARFLARE is not set 386# CONFIG_NET_VENDOR_SOLARFLARE is not set
379# CONFIG_NET_VENDOR_STMICRO is not set 387# CONFIG_NET_VENDOR_STMICRO is not set
380# CONFIG_NET_VENDOR_SYNOPSYS is not set
381# CONFIG_NET_VENDOR_VIA is not set 388# CONFIG_NET_VENDOR_VIA is not set
382# CONFIG_NET_VENDOR_WIZNET is not set 389# CONFIG_NET_VENDOR_WIZNET is not set
383CONFIG_PPP=m 390CONFIG_PPP=m
@@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m
515CONFIG_DLM=m 522CONFIG_DLM=m
516# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 523# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
517CONFIG_MAGIC_SYSRQ=y 524CONFIG_MAGIC_SYSRQ=y
525CONFIG_WW_MUTEX_SELFTEST=m
526CONFIG_ATOMIC64_SELFTEST=m
518CONFIG_ASYNC_RAID6_TEST=m 527CONFIG_ASYNC_RAID6_TEST=m
519CONFIG_TEST_HEXDUMP=m 528CONFIG_TEST_HEXDUMP=m
520CONFIG_TEST_STRING_HELPERS=m 529CONFIG_TEST_STRING_HELPERS=m
@@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
545CONFIG_CRYPTO_LRW=m 554CONFIG_CRYPTO_LRW=m
546CONFIG_CRYPTO_PCBC=m 555CONFIG_CRYPTO_PCBC=m
547CONFIG_CRYPTO_KEYWRAP=m 556CONFIG_CRYPTO_KEYWRAP=m
557CONFIG_CRYPTO_CMAC=m
548CONFIG_CRYPTO_XCBC=m 558CONFIG_CRYPTO_XCBC=m
549CONFIG_CRYPTO_VMAC=m 559CONFIG_CRYPTO_VMAC=m
550CONFIG_CRYPTO_MICHAEL_MIC=m 560CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m
556CONFIG_CRYPTO_SHA3=m 566CONFIG_CRYPTO_SHA3=m
557CONFIG_CRYPTO_TGR192=m 567CONFIG_CRYPTO_TGR192=m
558CONFIG_CRYPTO_WP512=m 568CONFIG_CRYPTO_WP512=m
569CONFIG_CRYPTO_AES_TI=m
559CONFIG_CRYPTO_ANUBIS=m 570CONFIG_CRYPTO_ANUBIS=m
560CONFIG_CRYPTO_BLOWFISH=m 571CONFIG_CRYPTO_BLOWFISH=m
561CONFIG_CRYPTO_CAMELLIA=m 572CONFIG_CRYPTO_CAMELLIA=m
@@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
580CONFIG_CRYPTO_USER_API_RNG=m 591CONFIG_CRYPTO_USER_API_RNG=m
581CONFIG_CRYPTO_USER_API_AEAD=m 592CONFIG_CRYPTO_USER_API_AEAD=m
582# CONFIG_CRYPTO_HW is not set 593# CONFIG_CRYPTO_HW is not set
594CONFIG_CRC32_SELFTEST=m
583CONFIG_XZ_DEC_TEST=m 595CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 2ccaca858f05..cbd8ee24d1bc 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
25CONFIG_SUN_PARTITION=y 25CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68040=y 31CONFIG_M68040=y
@@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m
56CONFIG_NET_FOU_IP_TUNNELS=y 57CONFIG_NET_FOU_IP_TUNNELS=y
57CONFIG_INET_AH=m 58CONFIG_INET_AH=m
58CONFIG_INET_ESP=m 59CONFIG_INET_ESP=m
60CONFIG_INET_ESP_OFFLOAD=m
59CONFIG_INET_IPCOMP=m 61CONFIG_INET_IPCOMP=m
60CONFIG_INET_XFRM_MODE_TRANSPORT=m 62CONFIG_INET_XFRM_MODE_TRANSPORT=m
61CONFIG_INET_XFRM_MODE_TUNNEL=m 63CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -67,6 +69,7 @@ CONFIG_IPV6=m
67CONFIG_IPV6_ROUTER_PREF=y 69CONFIG_IPV6_ROUTER_PREF=y
68CONFIG_INET6_AH=m 70CONFIG_INET6_AH=m
69CONFIG_INET6_ESP=m 71CONFIG_INET6_ESP=m
72CONFIG_INET6_ESP_OFFLOAD=m
70CONFIG_INET6_IPCOMP=m 73CONFIG_INET6_IPCOMP=m
71CONFIG_IPV6_ILA=m 74CONFIG_IPV6_ILA=m
72CONFIG_IPV6_VTI=m 75CONFIG_IPV6_VTI=m
@@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m
97CONFIG_NFT_CT=m 100CONFIG_NFT_CT=m
98CONFIG_NFT_SET_RBTREE=m 101CONFIG_NFT_SET_RBTREE=m
99CONFIG_NFT_SET_HASH=m 102CONFIG_NFT_SET_HASH=m
103CONFIG_NFT_SET_BITMAP=m
100CONFIG_NFT_COUNTER=m 104CONFIG_NFT_COUNTER=m
101CONFIG_NFT_LOG=m 105CONFIG_NFT_LOG=m
102CONFIG_NFT_LIMIT=m 106CONFIG_NFT_LIMIT=m
@@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m
294CONFIG_NET_L3_MASTER_DEV=y 298CONFIG_NET_L3_MASTER_DEV=y
295CONFIG_AF_KCM=m 299CONFIG_AF_KCM=m
296# CONFIG_WIRELESS is not set 300# CONFIG_WIRELESS is not set
301CONFIG_PSAMPLE=m
302CONFIG_NET_IFE=m
297CONFIG_NET_DEVLINK=m 303CONFIG_NET_DEVLINK=m
298# CONFIG_UEVENT_HELPER is not set 304# CONFIG_UEVENT_HELPER is not set
299CONFIG_DEVTMPFS=y 305CONFIG_DEVTMPFS=y
@@ -352,6 +358,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
352CONFIG_MACVLAN=m 358CONFIG_MACVLAN=m
353CONFIG_MACVTAP=m 359CONFIG_MACVTAP=m
354CONFIG_IPVLAN=m 360CONFIG_IPVLAN=m
361CONFIG_IPVTAP=m
355CONFIG_VXLAN=m 362CONFIG_VXLAN=m
356CONFIG_GENEVE=m 363CONFIG_GENEVE=m
357CONFIG_GTP=m 364CONFIG_GTP=m
@@ -361,6 +368,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y
361CONFIG_VETH=m 368CONFIG_VETH=m
362# CONFIG_NET_VENDOR_ALACRITECH is not set 369# CONFIG_NET_VENDOR_ALACRITECH is not set
363# CONFIG_NET_VENDOR_AMAZON is not set 370# CONFIG_NET_VENDOR_AMAZON is not set
371# CONFIG_NET_VENDOR_AQUANTIA is not set
364# CONFIG_NET_VENDOR_ARC is not set 372# CONFIG_NET_VENDOR_ARC is not set
365# CONFIG_NET_CADENCE is not set 373# CONFIG_NET_CADENCE is not set
366# CONFIG_NET_VENDOR_BROADCOM is not set 374# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -377,7 +385,6 @@ CONFIG_MVME16x_NET=y
377# CONFIG_NET_VENDOR_SEEQ is not set 385# CONFIG_NET_VENDOR_SEEQ is not set
378# CONFIG_NET_VENDOR_SOLARFLARE is not set 386# CONFIG_NET_VENDOR_SOLARFLARE is not set
379# CONFIG_NET_VENDOR_STMICRO is not set 387# CONFIG_NET_VENDOR_STMICRO is not set
380# CONFIG_NET_VENDOR_SYNOPSYS is not set
381# CONFIG_NET_VENDOR_VIA is not set 388# CONFIG_NET_VENDOR_VIA is not set
382# CONFIG_NET_VENDOR_WIZNET is not set 389# CONFIG_NET_VENDOR_WIZNET is not set
383CONFIG_PPP=m 390CONFIG_PPP=m
@@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m
515CONFIG_DLM=m 522CONFIG_DLM=m
516# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 523# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
517CONFIG_MAGIC_SYSRQ=y 524CONFIG_MAGIC_SYSRQ=y
525CONFIG_WW_MUTEX_SELFTEST=m
526CONFIG_ATOMIC64_SELFTEST=m
518CONFIG_ASYNC_RAID6_TEST=m 527CONFIG_ASYNC_RAID6_TEST=m
519CONFIG_TEST_HEXDUMP=m 528CONFIG_TEST_HEXDUMP=m
520CONFIG_TEST_STRING_HELPERS=m 529CONFIG_TEST_STRING_HELPERS=m
@@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
545CONFIG_CRYPTO_LRW=m 554CONFIG_CRYPTO_LRW=m
546CONFIG_CRYPTO_PCBC=m 555CONFIG_CRYPTO_PCBC=m
547CONFIG_CRYPTO_KEYWRAP=m 556CONFIG_CRYPTO_KEYWRAP=m
557CONFIG_CRYPTO_CMAC=m
548CONFIG_CRYPTO_XCBC=m 558CONFIG_CRYPTO_XCBC=m
549CONFIG_CRYPTO_VMAC=m 559CONFIG_CRYPTO_VMAC=m
550CONFIG_CRYPTO_MICHAEL_MIC=m 560CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m
556CONFIG_CRYPTO_SHA3=m 566CONFIG_CRYPTO_SHA3=m
557CONFIG_CRYPTO_TGR192=m 567CONFIG_CRYPTO_TGR192=m
558CONFIG_CRYPTO_WP512=m 568CONFIG_CRYPTO_WP512=m
569CONFIG_CRYPTO_AES_TI=m
559CONFIG_CRYPTO_ANUBIS=m 570CONFIG_CRYPTO_ANUBIS=m
560CONFIG_CRYPTO_BLOWFISH=m 571CONFIG_CRYPTO_BLOWFISH=m
561CONFIG_CRYPTO_CAMELLIA=m 572CONFIG_CRYPTO_CAMELLIA=m
@@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
580CONFIG_CRYPTO_USER_API_RNG=m 591CONFIG_CRYPTO_USER_API_RNG=m
581CONFIG_CRYPTO_USER_API_AEAD=m 592CONFIG_CRYPTO_USER_API_AEAD=m
582# CONFIG_CRYPTO_HW is not set 593# CONFIG_CRYPTO_HW is not set
594CONFIG_CRC32_SELFTEST=m
583CONFIG_XZ_DEC_TEST=m 595CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 5599f3fd5fcd..1e82cc944339 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_SYSV68_PARTITION=y 27CONFIG_SYSV68_PARTITION=y
28CONFIG_IOSCHED_DEADLINE=m 28CONFIG_IOSCHED_DEADLINE=m
29CONFIG_MQ_IOSCHED_DEADLINE=m
29CONFIG_KEXEC=y 30CONFIG_KEXEC=y
30CONFIG_BOOTINFO_PROC=y 31CONFIG_BOOTINFO_PROC=y
31CONFIG_M68040=y 32CONFIG_M68040=y
@@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m
56CONFIG_NET_FOU_IP_TUNNELS=y 57CONFIG_NET_FOU_IP_TUNNELS=y
57CONFIG_INET_AH=m 58CONFIG_INET_AH=m
58CONFIG_INET_ESP=m 59CONFIG_INET_ESP=m
60CONFIG_INET_ESP_OFFLOAD=m
59CONFIG_INET_IPCOMP=m 61CONFIG_INET_IPCOMP=m
60CONFIG_INET_XFRM_MODE_TRANSPORT=m 62CONFIG_INET_XFRM_MODE_TRANSPORT=m
61CONFIG_INET_XFRM_MODE_TUNNEL=m 63CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -67,6 +69,7 @@ CONFIG_IPV6=m
67CONFIG_IPV6_ROUTER_PREF=y 69CONFIG_IPV6_ROUTER_PREF=y
68CONFIG_INET6_AH=m 70CONFIG_INET6_AH=m
69CONFIG_INET6_ESP=m 71CONFIG_INET6_ESP=m
72CONFIG_INET6_ESP_OFFLOAD=m
70CONFIG_INET6_IPCOMP=m 73CONFIG_INET6_IPCOMP=m
71CONFIG_IPV6_ILA=m 74CONFIG_IPV6_ILA=m
72CONFIG_IPV6_VTI=m 75CONFIG_IPV6_VTI=m
@@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m
97CONFIG_NFT_CT=m 100CONFIG_NFT_CT=m
98CONFIG_NFT_SET_RBTREE=m 101CONFIG_NFT_SET_RBTREE=m
99CONFIG_NFT_SET_HASH=m 102CONFIG_NFT_SET_HASH=m
103CONFIG_NFT_SET_BITMAP=m
100CONFIG_NFT_COUNTER=m 104CONFIG_NFT_COUNTER=m
101CONFIG_NFT_LOG=m 105CONFIG_NFT_LOG=m
102CONFIG_NFT_LIMIT=m 106CONFIG_NFT_LIMIT=m
@@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m
294CONFIG_NET_L3_MASTER_DEV=y 298CONFIG_NET_L3_MASTER_DEV=y
295CONFIG_AF_KCM=m 299CONFIG_AF_KCM=m
296# CONFIG_WIRELESS is not set 300# CONFIG_WIRELESS is not set
301CONFIG_PSAMPLE=m
302CONFIG_NET_IFE=m
297CONFIG_NET_DEVLINK=m 303CONFIG_NET_DEVLINK=m
298# CONFIG_UEVENT_HELPER is not set 304# CONFIG_UEVENT_HELPER is not set
299CONFIG_DEVTMPFS=y 305CONFIG_DEVTMPFS=y
@@ -358,6 +364,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
358CONFIG_MACVLAN=m 364CONFIG_MACVLAN=m
359CONFIG_MACVTAP=m 365CONFIG_MACVTAP=m
360CONFIG_IPVLAN=m 366CONFIG_IPVLAN=m
367CONFIG_IPVTAP=m
361CONFIG_VXLAN=m 368CONFIG_VXLAN=m
362CONFIG_GENEVE=m 369CONFIG_GENEVE=m
363CONFIG_GTP=m 370CONFIG_GTP=m
@@ -369,6 +376,7 @@ CONFIG_VETH=m
369# CONFIG_NET_VENDOR_ALACRITECH is not set 376# CONFIG_NET_VENDOR_ALACRITECH is not set
370# CONFIG_NET_VENDOR_AMAZON is not set 377# CONFIG_NET_VENDOR_AMAZON is not set
371# CONFIG_NET_VENDOR_AMD is not set 378# CONFIG_NET_VENDOR_AMD is not set
379# CONFIG_NET_VENDOR_AQUANTIA is not set
372# CONFIG_NET_VENDOR_ARC is not set 380# CONFIG_NET_VENDOR_ARC is not set
373# CONFIG_NET_CADENCE is not set 381# CONFIG_NET_CADENCE is not set
374# CONFIG_NET_VENDOR_BROADCOM is not set 382# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -388,7 +396,6 @@ CONFIG_NE2000=y
388# CONFIG_NET_VENDOR_SOLARFLARE is not set 396# CONFIG_NET_VENDOR_SOLARFLARE is not set
389# CONFIG_NET_VENDOR_SMSC is not set 397# CONFIG_NET_VENDOR_SMSC is not set
390# CONFIG_NET_VENDOR_STMICRO is not set 398# CONFIG_NET_VENDOR_STMICRO is not set
391# CONFIG_NET_VENDOR_SYNOPSYS is not set
392# CONFIG_NET_VENDOR_VIA is not set 399# CONFIG_NET_VENDOR_VIA is not set
393# CONFIG_NET_VENDOR_WIZNET is not set 400# CONFIG_NET_VENDOR_WIZNET is not set
394CONFIG_PLIP=m 401CONFIG_PLIP=m
@@ -538,6 +545,8 @@ CONFIG_NLS_MAC_TURKISH=m
538CONFIG_DLM=m 545CONFIG_DLM=m
539# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 546# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
540CONFIG_MAGIC_SYSRQ=y 547CONFIG_MAGIC_SYSRQ=y
548CONFIG_WW_MUTEX_SELFTEST=m
549CONFIG_ATOMIC64_SELFTEST=m
541CONFIG_ASYNC_RAID6_TEST=m 550CONFIG_ASYNC_RAID6_TEST=m
542CONFIG_TEST_HEXDUMP=m 551CONFIG_TEST_HEXDUMP=m
543CONFIG_TEST_STRING_HELPERS=m 552CONFIG_TEST_STRING_HELPERS=m
@@ -568,6 +577,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
568CONFIG_CRYPTO_LRW=m 577CONFIG_CRYPTO_LRW=m
569CONFIG_CRYPTO_PCBC=m 578CONFIG_CRYPTO_PCBC=m
570CONFIG_CRYPTO_KEYWRAP=m 579CONFIG_CRYPTO_KEYWRAP=m
580CONFIG_CRYPTO_CMAC=m
571CONFIG_CRYPTO_XCBC=m 581CONFIG_CRYPTO_XCBC=m
572CONFIG_CRYPTO_VMAC=m 582CONFIG_CRYPTO_VMAC=m
573CONFIG_CRYPTO_MICHAEL_MIC=m 583CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -579,6 +589,7 @@ CONFIG_CRYPTO_SHA512=m
579CONFIG_CRYPTO_SHA3=m 589CONFIG_CRYPTO_SHA3=m
580CONFIG_CRYPTO_TGR192=m 590CONFIG_CRYPTO_TGR192=m
581CONFIG_CRYPTO_WP512=m 591CONFIG_CRYPTO_WP512=m
592CONFIG_CRYPTO_AES_TI=m
582CONFIG_CRYPTO_ANUBIS=m 593CONFIG_CRYPTO_ANUBIS=m
583CONFIG_CRYPTO_BLOWFISH=m 594CONFIG_CRYPTO_BLOWFISH=m
584CONFIG_CRYPTO_CAMELLIA=m 595CONFIG_CRYPTO_CAMELLIA=m
@@ -603,4 +614,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
603CONFIG_CRYPTO_USER_API_RNG=m 614CONFIG_CRYPTO_USER_API_RNG=m
604CONFIG_CRYPTO_USER_API_AEAD=m 615CONFIG_CRYPTO_USER_API_AEAD=m
605# CONFIG_CRYPTO_HW is not set 616# CONFIG_CRYPTO_HW is not set
617CONFIG_CRC32_SELFTEST=m
606CONFIG_XZ_DEC_TEST=m 618CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 313bf0a562ad..f9e77f57a972 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
25# CONFIG_EFI_PARTITION is not set 25# CONFIG_EFI_PARTITION is not set
26CONFIG_SYSV68_PARTITION=y 26CONFIG_SYSV68_PARTITION=y
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_SUN3=y 31CONFIG_SUN3=y
@@ -53,6 +54,7 @@ CONFIG_NET_IPVTI=m
53CONFIG_NET_FOU_IP_TUNNELS=y 54CONFIG_NET_FOU_IP_TUNNELS=y
54CONFIG_INET_AH=m 55CONFIG_INET_AH=m
55CONFIG_INET_ESP=m 56CONFIG_INET_ESP=m
57CONFIG_INET_ESP_OFFLOAD=m
56CONFIG_INET_IPCOMP=m 58CONFIG_INET_IPCOMP=m
57CONFIG_INET_XFRM_MODE_TRANSPORT=m 59CONFIG_INET_XFRM_MODE_TRANSPORT=m
58CONFIG_INET_XFRM_MODE_TUNNEL=m 60CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -64,6 +66,7 @@ CONFIG_IPV6=m
64CONFIG_IPV6_ROUTER_PREF=y 66CONFIG_IPV6_ROUTER_PREF=y
65CONFIG_INET6_AH=m 67CONFIG_INET6_AH=m
66CONFIG_INET6_ESP=m 68CONFIG_INET6_ESP=m
69CONFIG_INET6_ESP_OFFLOAD=m
67CONFIG_INET6_IPCOMP=m 70CONFIG_INET6_IPCOMP=m
68CONFIG_IPV6_ILA=m 71CONFIG_IPV6_ILA=m
69CONFIG_IPV6_VTI=m 72CONFIG_IPV6_VTI=m
@@ -94,6 +97,7 @@ CONFIG_NFT_NUMGEN=m
94CONFIG_NFT_CT=m 97CONFIG_NFT_CT=m
95CONFIG_NFT_SET_RBTREE=m 98CONFIG_NFT_SET_RBTREE=m
96CONFIG_NFT_SET_HASH=m 99CONFIG_NFT_SET_HASH=m
100CONFIG_NFT_SET_BITMAP=m
97CONFIG_NFT_COUNTER=m 101CONFIG_NFT_COUNTER=m
98CONFIG_NFT_LOG=m 102CONFIG_NFT_LOG=m
99CONFIG_NFT_LIMIT=m 103CONFIG_NFT_LIMIT=m
@@ -291,6 +295,8 @@ CONFIG_MPLS_IPTUNNEL=m
291CONFIG_NET_L3_MASTER_DEV=y 295CONFIG_NET_L3_MASTER_DEV=y
292CONFIG_AF_KCM=m 296CONFIG_AF_KCM=m
293# CONFIG_WIRELESS is not set 297# CONFIG_WIRELESS is not set
298CONFIG_PSAMPLE=m
299CONFIG_NET_IFE=m
294CONFIG_NET_DEVLINK=m 300CONFIG_NET_DEVLINK=m
295# CONFIG_UEVENT_HELPER is not set 301# CONFIG_UEVENT_HELPER is not set
296CONFIG_DEVTMPFS=y 302CONFIG_DEVTMPFS=y
@@ -349,6 +355,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
349CONFIG_MACVLAN=m 355CONFIG_MACVLAN=m
350CONFIG_MACVTAP=m 356CONFIG_MACVTAP=m
351CONFIG_IPVLAN=m 357CONFIG_IPVLAN=m
358CONFIG_IPVTAP=m
352CONFIG_VXLAN=m 359CONFIG_VXLAN=m
353CONFIG_GENEVE=m 360CONFIG_GENEVE=m
354CONFIG_GTP=m 361CONFIG_GTP=m
@@ -359,6 +366,7 @@ CONFIG_VETH=m
359# CONFIG_NET_VENDOR_ALACRITECH is not set 366# CONFIG_NET_VENDOR_ALACRITECH is not set
360# CONFIG_NET_VENDOR_AMAZON is not set 367# CONFIG_NET_VENDOR_AMAZON is not set
361CONFIG_SUN3LANCE=y 368CONFIG_SUN3LANCE=y
369# CONFIG_NET_VENDOR_AQUANTIA is not set
362# CONFIG_NET_VENDOR_ARC is not set 370# CONFIG_NET_VENDOR_ARC is not set
363# CONFIG_NET_CADENCE is not set 371# CONFIG_NET_CADENCE is not set
364# CONFIG_NET_VENDOR_EZCHIP is not set 372# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -375,7 +383,6 @@ CONFIG_SUN3_82586=y
375# CONFIG_NET_VENDOR_SOLARFLARE is not set 383# CONFIG_NET_VENDOR_SOLARFLARE is not set
376# CONFIG_NET_VENDOR_STMICRO is not set 384# CONFIG_NET_VENDOR_STMICRO is not set
377# CONFIG_NET_VENDOR_SUN is not set 385# CONFIG_NET_VENDOR_SUN is not set
378# CONFIG_NET_VENDOR_SYNOPSYS is not set
379# CONFIG_NET_VENDOR_VIA is not set 386# CONFIG_NET_VENDOR_VIA is not set
380# CONFIG_NET_VENDOR_WIZNET is not set 387# CONFIG_NET_VENDOR_WIZNET is not set
381CONFIG_PPP=m 388CONFIG_PPP=m
@@ -517,6 +524,8 @@ CONFIG_NLS_MAC_TURKISH=m
517CONFIG_DLM=m 524CONFIG_DLM=m
518# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 525# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
519CONFIG_MAGIC_SYSRQ=y 526CONFIG_MAGIC_SYSRQ=y
527CONFIG_WW_MUTEX_SELFTEST=m
528CONFIG_ATOMIC64_SELFTEST=m
520CONFIG_ASYNC_RAID6_TEST=m 529CONFIG_ASYNC_RAID6_TEST=m
521CONFIG_TEST_HEXDUMP=m 530CONFIG_TEST_HEXDUMP=m
522CONFIG_TEST_STRING_HELPERS=m 531CONFIG_TEST_STRING_HELPERS=m
@@ -546,6 +555,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
546CONFIG_CRYPTO_LRW=m 555CONFIG_CRYPTO_LRW=m
547CONFIG_CRYPTO_PCBC=m 556CONFIG_CRYPTO_PCBC=m
548CONFIG_CRYPTO_KEYWRAP=m 557CONFIG_CRYPTO_KEYWRAP=m
558CONFIG_CRYPTO_CMAC=m
549CONFIG_CRYPTO_XCBC=m 559CONFIG_CRYPTO_XCBC=m
550CONFIG_CRYPTO_VMAC=m 560CONFIG_CRYPTO_VMAC=m
551CONFIG_CRYPTO_MICHAEL_MIC=m 561CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -557,6 +567,7 @@ CONFIG_CRYPTO_SHA512=m
557CONFIG_CRYPTO_SHA3=m 567CONFIG_CRYPTO_SHA3=m
558CONFIG_CRYPTO_TGR192=m 568CONFIG_CRYPTO_TGR192=m
559CONFIG_CRYPTO_WP512=m 569CONFIG_CRYPTO_WP512=m
570CONFIG_CRYPTO_AES_TI=m
560CONFIG_CRYPTO_ANUBIS=m 571CONFIG_CRYPTO_ANUBIS=m
561CONFIG_CRYPTO_BLOWFISH=m 572CONFIG_CRYPTO_BLOWFISH=m
562CONFIG_CRYPTO_CAMELLIA=m 573CONFIG_CRYPTO_CAMELLIA=m
@@ -581,4 +592,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
581CONFIG_CRYPTO_USER_API_RNG=m 592CONFIG_CRYPTO_USER_API_RNG=m
582CONFIG_CRYPTO_USER_API_AEAD=m 593CONFIG_CRYPTO_USER_API_AEAD=m
583# CONFIG_CRYPTO_HW is not set 594# CONFIG_CRYPTO_HW is not set
595CONFIG_CRC32_SELFTEST=m
584CONFIG_XZ_DEC_TEST=m 596CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 38b61365f769..3c394fcfb368 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
25# CONFIG_EFI_PARTITION is not set 25# CONFIG_EFI_PARTITION is not set
26CONFIG_SYSV68_PARTITION=y 26CONFIG_SYSV68_PARTITION=y
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_SUN3X=y 31CONFIG_SUN3X=y
@@ -53,6 +54,7 @@ CONFIG_NET_IPVTI=m
53CONFIG_NET_FOU_IP_TUNNELS=y 54CONFIG_NET_FOU_IP_TUNNELS=y
54CONFIG_INET_AH=m 55CONFIG_INET_AH=m
55CONFIG_INET_ESP=m 56CONFIG_INET_ESP=m
57CONFIG_INET_ESP_OFFLOAD=m
56CONFIG_INET_IPCOMP=m 58CONFIG_INET_IPCOMP=m
57CONFIG_INET_XFRM_MODE_TRANSPORT=m 59CONFIG_INET_XFRM_MODE_TRANSPORT=m
58CONFIG_INET_XFRM_MODE_TUNNEL=m 60CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -64,6 +66,7 @@ CONFIG_IPV6=m
64CONFIG_IPV6_ROUTER_PREF=y 66CONFIG_IPV6_ROUTER_PREF=y
65CONFIG_INET6_AH=m 67CONFIG_INET6_AH=m
66CONFIG_INET6_ESP=m 68CONFIG_INET6_ESP=m
69CONFIG_INET6_ESP_OFFLOAD=m
67CONFIG_INET6_IPCOMP=m 70CONFIG_INET6_IPCOMP=m
68CONFIG_IPV6_ILA=m 71CONFIG_IPV6_ILA=m
69CONFIG_IPV6_VTI=m 72CONFIG_IPV6_VTI=m
@@ -94,6 +97,7 @@ CONFIG_NFT_NUMGEN=m
94CONFIG_NFT_CT=m 97CONFIG_NFT_CT=m
95CONFIG_NFT_SET_RBTREE=m 98CONFIG_NFT_SET_RBTREE=m
96CONFIG_NFT_SET_HASH=m 99CONFIG_NFT_SET_HASH=m
100CONFIG_NFT_SET_BITMAP=m
97CONFIG_NFT_COUNTER=m 101CONFIG_NFT_COUNTER=m
98CONFIG_NFT_LOG=m 102CONFIG_NFT_LOG=m
99CONFIG_NFT_LIMIT=m 103CONFIG_NFT_LIMIT=m
@@ -291,6 +295,8 @@ CONFIG_MPLS_IPTUNNEL=m
291CONFIG_NET_L3_MASTER_DEV=y 295CONFIG_NET_L3_MASTER_DEV=y
292CONFIG_AF_KCM=m 296CONFIG_AF_KCM=m
293# CONFIG_WIRELESS is not set 297# CONFIG_WIRELESS is not set
298CONFIG_PSAMPLE=m
299CONFIG_NET_IFE=m
294CONFIG_NET_DEVLINK=m 300CONFIG_NET_DEVLINK=m
295# CONFIG_UEVENT_HELPER is not set 301# CONFIG_UEVENT_HELPER is not set
296CONFIG_DEVTMPFS=y 302CONFIG_DEVTMPFS=y
@@ -349,6 +355,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
349CONFIG_MACVLAN=m 355CONFIG_MACVLAN=m
350CONFIG_MACVTAP=m 356CONFIG_MACVTAP=m
351CONFIG_IPVLAN=m 357CONFIG_IPVLAN=m
358CONFIG_IPVTAP=m
352CONFIG_VXLAN=m 359CONFIG_VXLAN=m
353CONFIG_GENEVE=m 360CONFIG_GENEVE=m
354CONFIG_GTP=m 361CONFIG_GTP=m
@@ -359,6 +366,7 @@ CONFIG_VETH=m
359# CONFIG_NET_VENDOR_ALACRITECH is not set 366# CONFIG_NET_VENDOR_ALACRITECH is not set
360# CONFIG_NET_VENDOR_AMAZON is not set 367# CONFIG_NET_VENDOR_AMAZON is not set
361CONFIG_SUN3LANCE=y 368CONFIG_SUN3LANCE=y
369# CONFIG_NET_VENDOR_AQUANTIA is not set
362# CONFIG_NET_VENDOR_ARC is not set 370# CONFIG_NET_VENDOR_ARC is not set
363# CONFIG_NET_CADENCE is not set 371# CONFIG_NET_CADENCE is not set
364# CONFIG_NET_VENDOR_BROADCOM is not set 372# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -375,7 +383,6 @@ CONFIG_SUN3LANCE=y
375# CONFIG_NET_VENDOR_SEEQ is not set 383# CONFIG_NET_VENDOR_SEEQ is not set
376# CONFIG_NET_VENDOR_SOLARFLARE is not set 384# CONFIG_NET_VENDOR_SOLARFLARE is not set
377# CONFIG_NET_VENDOR_STMICRO is not set 385# CONFIG_NET_VENDOR_STMICRO is not set
378# CONFIG_NET_VENDOR_SYNOPSYS is not set
379# CONFIG_NET_VENDOR_VIA is not set 386# CONFIG_NET_VENDOR_VIA is not set
380# CONFIG_NET_VENDOR_WIZNET is not set 387# CONFIG_NET_VENDOR_WIZNET is not set
381CONFIG_PPP=m 388CONFIG_PPP=m
@@ -517,6 +524,8 @@ CONFIG_NLS_MAC_TURKISH=m
517CONFIG_DLM=m 524CONFIG_DLM=m
518# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 525# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
519CONFIG_MAGIC_SYSRQ=y 526CONFIG_MAGIC_SYSRQ=y
527CONFIG_WW_MUTEX_SELFTEST=m
528CONFIG_ATOMIC64_SELFTEST=m
520CONFIG_ASYNC_RAID6_TEST=m 529CONFIG_ASYNC_RAID6_TEST=m
521CONFIG_TEST_HEXDUMP=m 530CONFIG_TEST_HEXDUMP=m
522CONFIG_TEST_STRING_HELPERS=m 531CONFIG_TEST_STRING_HELPERS=m
@@ -547,6 +556,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
547CONFIG_CRYPTO_LRW=m 556CONFIG_CRYPTO_LRW=m
548CONFIG_CRYPTO_PCBC=m 557CONFIG_CRYPTO_PCBC=m
549CONFIG_CRYPTO_KEYWRAP=m 558CONFIG_CRYPTO_KEYWRAP=m
559CONFIG_CRYPTO_CMAC=m
550CONFIG_CRYPTO_XCBC=m 560CONFIG_CRYPTO_XCBC=m
551CONFIG_CRYPTO_VMAC=m 561CONFIG_CRYPTO_VMAC=m
552CONFIG_CRYPTO_MICHAEL_MIC=m 562CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -558,6 +568,7 @@ CONFIG_CRYPTO_SHA512=m
558CONFIG_CRYPTO_SHA3=m 568CONFIG_CRYPTO_SHA3=m
559CONFIG_CRYPTO_TGR192=m 569CONFIG_CRYPTO_TGR192=m
560CONFIG_CRYPTO_WP512=m 570CONFIG_CRYPTO_WP512=m
571CONFIG_CRYPTO_AES_TI=m
561CONFIG_CRYPTO_ANUBIS=m 572CONFIG_CRYPTO_ANUBIS=m
562CONFIG_CRYPTO_BLOWFISH=m 573CONFIG_CRYPTO_BLOWFISH=m
563CONFIG_CRYPTO_CAMELLIA=m 574CONFIG_CRYPTO_CAMELLIA=m
@@ -582,4 +593,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
582CONFIG_CRYPTO_USER_API_RNG=m 593CONFIG_CRYPTO_USER_API_RNG=m
583CONFIG_CRYPTO_USER_API_AEAD=m 594CONFIG_CRYPTO_USER_API_AEAD=m
584# CONFIG_CRYPTO_HW is not set 595# CONFIG_CRYPTO_HW is not set
596CONFIG_CRC32_SELFTEST=m
585CONFIG_XZ_DEC_TEST=m 597CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
index b4a9b0d5928d..dda58cfe8c22 100644
--- a/arch/m68k/include/asm/bitops.h
+++ b/arch/m68k/include/asm/bitops.h
@@ -148,7 +148,7 @@ static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
148#define __change_bit(nr, vaddr) change_bit(nr, vaddr) 148#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
149 149
150 150
151static inline int test_bit(int nr, const unsigned long *vaddr) 151static inline int test_bit(int nr, const volatile unsigned long *vaddr)
152{ 152{
153 return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; 153 return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
154} 154}
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index a857d82ec509..aab1edd0d4ba 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
4#include <uapi/asm/unistd.h> 4#include <uapi/asm/unistd.h>
5 5
6 6
7#define NR_syscalls 379 7#define NR_syscalls 380
8 8
9#define __ARCH_WANT_OLD_READDIR 9#define __ARCH_WANT_OLD_READDIR
10#define __ARCH_WANT_OLD_STAT 10#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 9fe674bf911f..25589f5b8669 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -384,5 +384,6 @@
384#define __NR_copy_file_range 376 384#define __NR_copy_file_range 376
385#define __NR_preadv2 377 385#define __NR_preadv2 377
386#define __NR_pwritev2 378 386#define __NR_pwritev2 378
387#define __NR_statx 379
387 388
388#endif /* _UAPI_ASM_M68K_UNISTD_H_ */ 389#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index d6fd6d9ced24..8c9fcfafe0dd 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -399,3 +399,4 @@ ENTRY(sys_call_table)
399 .long sys_copy_file_range 399 .long sys_copy_file_range
400 .long sys_preadv2 400 .long sys_preadv2
401 .long sys_pwritev2 401 .long sys_pwritev2
402 .long sys_statx
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
index 273e61225c27..07238b39638c 100644
--- a/arch/metag/include/asm/uaccess.h
+++ b/arch/metag/include/asm/uaccess.h
@@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count);
197 197
198#define strlen_user(str) strnlen_user(str, 32767) 198#define strlen_user(str) strnlen_user(str, 32767)
199 199
200extern unsigned long __must_check __copy_user_zeroing(void *to, 200extern unsigned long raw_copy_from_user(void *to, const void __user *from,
201 const void __user *from, 201 unsigned long n);
202 unsigned long n);
203 202
204static inline unsigned long 203static inline unsigned long
205copy_from_user(void *to, const void __user *from, unsigned long n) 204copy_from_user(void *to, const void __user *from, unsigned long n)
206{ 205{
206 unsigned long res = n;
207 if (likely(access_ok(VERIFY_READ, from, n))) 207 if (likely(access_ok(VERIFY_READ, from, n)))
208 return __copy_user_zeroing(to, from, n); 208 res = raw_copy_from_user(to, from, n);
209 memset(to, 0, n); 209 if (unlikely(res))
210 return n; 210 memset(to + (n - res), 0, res);
211 return res;
211} 212}
212 213
213#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n) 214#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
214#define __copy_from_user_inatomic __copy_from_user 215#define __copy_from_user_inatomic __copy_from_user
215 216
216extern unsigned long __must_check __copy_user(void __user *to, 217extern unsigned long __must_check __copy_user(void __user *to,
diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c
index 5fd16ee5280c..e615603a4b0a 100644
--- a/arch/metag/kernel/ptrace.c
+++ b/arch/metag/kernel/ptrace.c
@@ -26,6 +26,16 @@
26 * user_regset definitions. 26 * user_regset definitions.
27 */ 27 */
28 28
29static unsigned long user_txstatus(const struct pt_regs *regs)
30{
31 unsigned long data = (unsigned long)regs->ctx.Flags;
32
33 if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
34 data |= USER_GP_REGS_STATUS_CATCH_BIT;
35
36 return data;
37}
38
29int metag_gp_regs_copyout(const struct pt_regs *regs, 39int metag_gp_regs_copyout(const struct pt_regs *regs,
30 unsigned int pos, unsigned int count, 40 unsigned int pos, unsigned int count,
31 void *kbuf, void __user *ubuf) 41 void *kbuf, void __user *ubuf)
@@ -64,9 +74,7 @@ int metag_gp_regs_copyout(const struct pt_regs *regs,
64 if (ret) 74 if (ret)
65 goto out; 75 goto out;
66 /* TXSTATUS */ 76 /* TXSTATUS */
67 data = (unsigned long)regs->ctx.Flags; 77 data = user_txstatus(regs);
68 if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
69 data |= USER_GP_REGS_STATUS_CATCH_BIT;
70 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 78 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
71 &data, 4*25, 4*26); 79 &data, 4*25, 4*26);
72 if (ret) 80 if (ret)
@@ -121,6 +129,7 @@ int metag_gp_regs_copyin(struct pt_regs *regs,
121 if (ret) 129 if (ret)
122 goto out; 130 goto out;
123 /* TXSTATUS */ 131 /* TXSTATUS */
132 data = user_txstatus(regs);
124 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 133 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
125 &data, 4*25, 4*26); 134 &data, 4*25, 4*26);
126 if (ret) 135 if (ret)
@@ -246,6 +255,8 @@ int metag_rp_state_copyin(struct pt_regs *regs,
246 unsigned long long *ptr; 255 unsigned long long *ptr;
247 int ret, i; 256 int ret, i;
248 257
258 if (count < 4*13)
259 return -EINVAL;
249 /* Read the entire pipeline before making any changes */ 260 /* Read the entire pipeline before making any changes */
250 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 261 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
251 &rp, 0, 4*13); 262 &rp, 0, 4*13);
@@ -305,7 +316,7 @@ static int metag_tls_set(struct task_struct *target,
305 const void *kbuf, const void __user *ubuf) 316 const void *kbuf, const void __user *ubuf)
306{ 317{
307 int ret; 318 int ret;
308 void __user *tls; 319 void __user *tls = target->thread.tls_ptr;
309 320
310 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 321 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
311 if (ret) 322 if (ret)
diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
index b3ebfe9c8e88..2792fc621088 100644
--- a/arch/metag/lib/usercopy.c
+++ b/arch/metag/lib/usercopy.c
@@ -29,7 +29,6 @@
29 COPY \ 29 COPY \
30 "1:\n" \ 30 "1:\n" \
31 " .section .fixup,\"ax\"\n" \ 31 " .section .fixup,\"ax\"\n" \
32 " MOV D1Ar1,#0\n" \
33 FIXUP \ 32 FIXUP \
34 " MOVT D1Ar1,#HI(1b)\n" \ 33 " MOVT D1Ar1,#HI(1b)\n" \
35 " JUMP D1Ar1,#LO(1b)\n" \ 34 " JUMP D1Ar1,#LO(1b)\n" \
@@ -260,27 +259,31 @@
260 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 259 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
261 "22:\n" \ 260 "22:\n" \
262 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 261 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
263 "SUB %3, %3, #32\n" \
264 "23:\n" \ 262 "23:\n" \
265 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 263 "SUB %3, %3, #32\n" \
266 "24:\n" \ 264 "24:\n" \
265 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
266 "25:\n" \
267 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 267 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
268 "26:\n" \
268 "SUB %3, %3, #32\n" \ 269 "SUB %3, %3, #32\n" \
269 "DCACHE [%1+#-64], D0Ar6\n" \ 270 "DCACHE [%1+#-64], D0Ar6\n" \
270 "BR $Lloop"id"\n" \ 271 "BR $Lloop"id"\n" \
271 \ 272 \
272 "MOV RAPF, %1\n" \ 273 "MOV RAPF, %1\n" \
273 "25:\n" \ 274 "27:\n" \
274 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 275 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
275 "26:\n" \ 276 "28:\n" \
276 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 277 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
278 "29:\n" \
277 "SUB %3, %3, #32\n" \ 279 "SUB %3, %3, #32\n" \
278 "27:\n" \ 280 "30:\n" \
279 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 281 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
280 "28:\n" \ 282 "31:\n" \
281 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 283 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
284 "32:\n" \
282 "SUB %0, %0, #8\n" \ 285 "SUB %0, %0, #8\n" \
283 "29:\n" \ 286 "33:\n" \
284 "SETL [%0++], D0.7, D1.7\n" \ 287 "SETL [%0++], D0.7, D1.7\n" \
285 "SUB %3, %3, #32\n" \ 288 "SUB %3, %3, #32\n" \
286 "1:" \ 289 "1:" \
@@ -312,11 +315,15 @@
312 " .long 26b,3b\n" \ 315 " .long 26b,3b\n" \
313 " .long 27b,3b\n" \ 316 " .long 27b,3b\n" \
314 " .long 28b,3b\n" \ 317 " .long 28b,3b\n" \
315 " .long 29b,4b\n" \ 318 " .long 29b,3b\n" \
319 " .long 30b,3b\n" \
320 " .long 31b,3b\n" \
321 " .long 32b,3b\n" \
322 " .long 33b,4b\n" \
316 " .previous\n" \ 323 " .previous\n" \
317 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ 324 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
318 : "0" (to), "1" (from), "2" (ret), "3" (n) \ 325 : "0" (to), "1" (from), "2" (ret), "3" (n) \
319 : "D1Ar1", "D0Ar2", "memory") 326 : "D1Ar1", "D0Ar2", "cc", "memory")
320 327
321/* rewind 'to' and 'from' pointers when a fault occurs 328/* rewind 'to' and 'from' pointers when a fault occurs
322 * 329 *
@@ -342,7 +349,7 @@
342#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\ 349#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
343 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ 350 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
344 "LSR D0Ar2, D0Ar2, #8\n" \ 351 "LSR D0Ar2, D0Ar2, #8\n" \
345 "AND D0Ar2, D0Ar2, #0x7\n" \ 352 "ANDS D0Ar2, D0Ar2, #0x7\n" \
346 "ADDZ D0Ar2, D0Ar2, #4\n" \ 353 "ADDZ D0Ar2, D0Ar2, #4\n" \
347 "SUB D0Ar2, D0Ar2, #1\n" \ 354 "SUB D0Ar2, D0Ar2, #1\n" \
348 "MOV D1Ar1, #4\n" \ 355 "MOV D1Ar1, #4\n" \
@@ -403,47 +410,55 @@
403 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 410 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
404 "22:\n" \ 411 "22:\n" \
405 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 412 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
406 "SUB %3, %3, #16\n" \
407 "23:\n" \ 413 "23:\n" \
408 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
409 "24:\n" \
410 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
411 "SUB %3, %3, #16\n" \ 414 "SUB %3, %3, #16\n" \
412 "25:\n" \ 415 "24:\n" \
413 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 416 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
414 "26:\n" \ 417 "25:\n" \
415 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 418 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
419 "26:\n" \
416 "SUB %3, %3, #16\n" \ 420 "SUB %3, %3, #16\n" \
417 "27:\n" \ 421 "27:\n" \
418 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 422 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
419 "28:\n" \ 423 "28:\n" \
420 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 424 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
425 "29:\n" \
426 "SUB %3, %3, #16\n" \
427 "30:\n" \
428 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
429 "31:\n" \
430 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
431 "32:\n" \
421 "SUB %3, %3, #16\n" \ 432 "SUB %3, %3, #16\n" \
422 "DCACHE [%1+#-64], D0Ar6\n" \ 433 "DCACHE [%1+#-64], D0Ar6\n" \
423 "BR $Lloop"id"\n" \ 434 "BR $Lloop"id"\n" \
424 \ 435 \
425 "MOV RAPF, %1\n" \ 436 "MOV RAPF, %1\n" \
426 "29:\n" \ 437 "33:\n" \
427 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 438 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
428 "30:\n" \ 439 "34:\n" \
429 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 440 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
441 "35:\n" \
430 "SUB %3, %3, #16\n" \ 442 "SUB %3, %3, #16\n" \
431 "31:\n" \ 443 "36:\n" \
432 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 444 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
433 "32:\n" \ 445 "37:\n" \
434 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 446 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
447 "38:\n" \
435 "SUB %3, %3, #16\n" \ 448 "SUB %3, %3, #16\n" \
436 "33:\n" \ 449 "39:\n" \
437 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 450 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
438 "34:\n" \ 451 "40:\n" \
439 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 452 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
453 "41:\n" \
440 "SUB %3, %3, #16\n" \ 454 "SUB %3, %3, #16\n" \
441 "35:\n" \ 455 "42:\n" \
442 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 456 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
443 "36:\n" \ 457 "43:\n" \
444 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 458 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
459 "44:\n" \
445 "SUB %0, %0, #4\n" \ 460 "SUB %0, %0, #4\n" \
446 "37:\n" \ 461 "45:\n" \
447 "SETD [%0++], D0.7\n" \ 462 "SETD [%0++], D0.7\n" \
448 "SUB %3, %3, #16\n" \ 463 "SUB %3, %3, #16\n" \
449 "1:" \ 464 "1:" \
@@ -483,11 +498,19 @@
483 " .long 34b,3b\n" \ 498 " .long 34b,3b\n" \
484 " .long 35b,3b\n" \ 499 " .long 35b,3b\n" \
485 " .long 36b,3b\n" \ 500 " .long 36b,3b\n" \
486 " .long 37b,4b\n" \ 501 " .long 37b,3b\n" \
502 " .long 38b,3b\n" \
503 " .long 39b,3b\n" \
504 " .long 40b,3b\n" \
505 " .long 41b,3b\n" \
506 " .long 42b,3b\n" \
507 " .long 43b,3b\n" \
508 " .long 44b,3b\n" \
509 " .long 45b,4b\n" \
487 " .previous\n" \ 510 " .previous\n" \
488 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ 511 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
489 : "0" (to), "1" (from), "2" (ret), "3" (n) \ 512 : "0" (to), "1" (from), "2" (ret), "3" (n) \
490 : "D1Ar1", "D0Ar2", "memory") 513 : "D1Ar1", "D0Ar2", "cc", "memory")
491 514
492/* rewind 'to' and 'from' pointers when a fault occurs 515/* rewind 'to' and 'from' pointers when a fault occurs
493 * 516 *
@@ -513,7 +536,7 @@
513#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\ 536#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
514 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ 537 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
515 "LSR D0Ar2, D0Ar2, #8\n" \ 538 "LSR D0Ar2, D0Ar2, #8\n" \
516 "AND D0Ar2, D0Ar2, #0x7\n" \ 539 "ANDS D0Ar2, D0Ar2, #0x7\n" \
517 "ADDZ D0Ar2, D0Ar2, #4\n" \ 540 "ADDZ D0Ar2, D0Ar2, #4\n" \
518 "SUB D0Ar2, D0Ar2, #1\n" \ 541 "SUB D0Ar2, D0Ar2, #1\n" \
519 "MOV D1Ar1, #4\n" \ 542 "MOV D1Ar1, #4\n" \
@@ -538,23 +561,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
538 if ((unsigned long) src & 1) { 561 if ((unsigned long) src & 1) {
539 __asm_copy_to_user_1(dst, src, retn); 562 __asm_copy_to_user_1(dst, src, retn);
540 n--; 563 n--;
564 if (retn)
565 return retn + n;
541 } 566 }
542 if ((unsigned long) dst & 1) { 567 if ((unsigned long) dst & 1) {
543 /* Worst case - byte copy */ 568 /* Worst case - byte copy */
544 while (n > 0) { 569 while (n > 0) {
545 __asm_copy_to_user_1(dst, src, retn); 570 __asm_copy_to_user_1(dst, src, retn);
546 n--; 571 n--;
572 if (retn)
573 return retn + n;
547 } 574 }
548 } 575 }
549 if (((unsigned long) src & 2) && n >= 2) { 576 if (((unsigned long) src & 2) && n >= 2) {
550 __asm_copy_to_user_2(dst, src, retn); 577 __asm_copy_to_user_2(dst, src, retn);
551 n -= 2; 578 n -= 2;
579 if (retn)
580 return retn + n;
552 } 581 }
553 if ((unsigned long) dst & 2) { 582 if ((unsigned long) dst & 2) {
554 /* Second worst case - word copy */ 583 /* Second worst case - word copy */
555 while (n >= 2) { 584 while (n >= 2) {
556 __asm_copy_to_user_2(dst, src, retn); 585 __asm_copy_to_user_2(dst, src, retn);
557 n -= 2; 586 n -= 2;
587 if (retn)
588 return retn + n;
558 } 589 }
559 } 590 }
560 591
@@ -569,6 +600,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
569 while (n >= 8) { 600 while (n >= 8) {
570 __asm_copy_to_user_8x64(dst, src, retn); 601 __asm_copy_to_user_8x64(dst, src, retn);
571 n -= 8; 602 n -= 8;
603 if (retn)
604 return retn + n;
572 } 605 }
573 } 606 }
574 if (n >= RAPF_MIN_BUF_SIZE) { 607 if (n >= RAPF_MIN_BUF_SIZE) {
@@ -581,6 +614,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
581 while (n >= 8) { 614 while (n >= 8) {
582 __asm_copy_to_user_8x64(dst, src, retn); 615 __asm_copy_to_user_8x64(dst, src, retn);
583 n -= 8; 616 n -= 8;
617 if (retn)
618 return retn + n;
584 } 619 }
585 } 620 }
586#endif 621#endif
@@ -588,11 +623,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
588 while (n >= 16) { 623 while (n >= 16) {
589 __asm_copy_to_user_16(dst, src, retn); 624 __asm_copy_to_user_16(dst, src, retn);
590 n -= 16; 625 n -= 16;
626 if (retn)
627 return retn + n;
591 } 628 }
592 629
593 while (n >= 4) { 630 while (n >= 4) {
594 __asm_copy_to_user_4(dst, src, retn); 631 __asm_copy_to_user_4(dst, src, retn);
595 n -= 4; 632 n -= 4;
633 if (retn)
634 return retn + n;
596 } 635 }
597 636
598 switch (n) { 637 switch (n) {
@@ -609,6 +648,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
609 break; 648 break;
610 } 649 }
611 650
651 /*
652 * If we get here, retn correctly reflects the number of failing
653 * bytes.
654 */
612 return retn; 655 return retn;
613} 656}
614EXPORT_SYMBOL(__copy_user); 657EXPORT_SYMBOL(__copy_user);
@@ -617,16 +660,14 @@ EXPORT_SYMBOL(__copy_user);
617 __asm_copy_user_cont(to, from, ret, \ 660 __asm_copy_user_cont(to, from, ret, \
618 " GETB D1Ar1,[%1++]\n" \ 661 " GETB D1Ar1,[%1++]\n" \
619 "2: SETB [%0++],D1Ar1\n", \ 662 "2: SETB [%0++],D1Ar1\n", \
620 "3: ADD %2,%2,#1\n" \ 663 "3: ADD %2,%2,#1\n", \
621 " SETB [%0++],D1Ar1\n", \
622 " .long 2b,3b\n") 664 " .long 2b,3b\n")
623 665
624#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ 666#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
625 __asm_copy_user_cont(to, from, ret, \ 667 __asm_copy_user_cont(to, from, ret, \
626 " GETW D1Ar1,[%1++]\n" \ 668 " GETW D1Ar1,[%1++]\n" \
627 "2: SETW [%0++],D1Ar1\n" COPY, \ 669 "2: SETW [%0++],D1Ar1\n" COPY, \
628 "3: ADD %2,%2,#2\n" \ 670 "3: ADD %2,%2,#2\n" FIXUP, \
629 " SETW [%0++],D1Ar1\n" FIXUP, \
630 " .long 2b,3b\n" TENTRY) 671 " .long 2b,3b\n" TENTRY)
631 672
632#define __asm_copy_from_user_2(to, from, ret) \ 673#define __asm_copy_from_user_2(to, from, ret) \
@@ -636,145 +677,26 @@ EXPORT_SYMBOL(__copy_user);
636 __asm_copy_from_user_2x_cont(to, from, ret, \ 677 __asm_copy_from_user_2x_cont(to, from, ret, \
637 " GETB D1Ar1,[%1++]\n" \ 678 " GETB D1Ar1,[%1++]\n" \
638 "4: SETB [%0++],D1Ar1\n", \ 679 "4: SETB [%0++],D1Ar1\n", \
639 "5: ADD %2,%2,#1\n" \ 680 "5: ADD %2,%2,#1\n", \
640 " SETB [%0++],D1Ar1\n", \
641 " .long 4b,5b\n") 681 " .long 4b,5b\n")
642 682
643#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ 683#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
644 __asm_copy_user_cont(to, from, ret, \ 684 __asm_copy_user_cont(to, from, ret, \
645 " GETD D1Ar1,[%1++]\n" \ 685 " GETD D1Ar1,[%1++]\n" \
646 "2: SETD [%0++],D1Ar1\n" COPY, \ 686 "2: SETD [%0++],D1Ar1\n" COPY, \
647 "3: ADD %2,%2,#4\n" \ 687 "3: ADD %2,%2,#4\n" FIXUP, \
648 " SETD [%0++],D1Ar1\n" FIXUP, \
649 " .long 2b,3b\n" TENTRY) 688 " .long 2b,3b\n" TENTRY)
650 689
651#define __asm_copy_from_user_4(to, from, ret) \ 690#define __asm_copy_from_user_4(to, from, ret) \
652 __asm_copy_from_user_4x_cont(to, from, ret, "", "", "") 691 __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
653 692
654#define __asm_copy_from_user_5(to, from, ret) \
655 __asm_copy_from_user_4x_cont(to, from, ret, \
656 " GETB D1Ar1,[%1++]\n" \
657 "4: SETB [%0++],D1Ar1\n", \
658 "5: ADD %2,%2,#1\n" \
659 " SETB [%0++],D1Ar1\n", \
660 " .long 4b,5b\n")
661
662#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
663 __asm_copy_from_user_4x_cont(to, from, ret, \
664 " GETW D1Ar1,[%1++]\n" \
665 "4: SETW [%0++],D1Ar1\n" COPY, \
666 "5: ADD %2,%2,#2\n" \
667 " SETW [%0++],D1Ar1\n" FIXUP, \
668 " .long 4b,5b\n" TENTRY)
669
670#define __asm_copy_from_user_6(to, from, ret) \
671 __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
672
673#define __asm_copy_from_user_7(to, from, ret) \
674 __asm_copy_from_user_6x_cont(to, from, ret, \
675 " GETB D1Ar1,[%1++]\n" \
676 "6: SETB [%0++],D1Ar1\n", \
677 "7: ADD %2,%2,#1\n" \
678 " SETB [%0++],D1Ar1\n", \
679 " .long 6b,7b\n")
680
681#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
682 __asm_copy_from_user_4x_cont(to, from, ret, \
683 " GETD D1Ar1,[%1++]\n" \
684 "4: SETD [%0++],D1Ar1\n" COPY, \
685 "5: ADD %2,%2,#4\n" \
686 " SETD [%0++],D1Ar1\n" FIXUP, \
687 " .long 4b,5b\n" TENTRY)
688
689#define __asm_copy_from_user_8(to, from, ret) \
690 __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
691
692#define __asm_copy_from_user_9(to, from, ret) \
693 __asm_copy_from_user_8x_cont(to, from, ret, \
694 " GETB D1Ar1,[%1++]\n" \
695 "6: SETB [%0++],D1Ar1\n", \
696 "7: ADD %2,%2,#1\n" \
697 " SETB [%0++],D1Ar1\n", \
698 " .long 6b,7b\n")
699
700#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
701 __asm_copy_from_user_8x_cont(to, from, ret, \
702 " GETW D1Ar1,[%1++]\n" \
703 "6: SETW [%0++],D1Ar1\n" COPY, \
704 "7: ADD %2,%2,#2\n" \
705 " SETW [%0++],D1Ar1\n" FIXUP, \
706 " .long 6b,7b\n" TENTRY)
707
708#define __asm_copy_from_user_10(to, from, ret) \
709 __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
710
711#define __asm_copy_from_user_11(to, from, ret) \
712 __asm_copy_from_user_10x_cont(to, from, ret, \
713 " GETB D1Ar1,[%1++]\n" \
714 "8: SETB [%0++],D1Ar1\n", \
715 "9: ADD %2,%2,#1\n" \
716 " SETB [%0++],D1Ar1\n", \
717 " .long 8b,9b\n")
718
719#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
720 __asm_copy_from_user_8x_cont(to, from, ret, \
721 " GETD D1Ar1,[%1++]\n" \
722 "6: SETD [%0++],D1Ar1\n" COPY, \
723 "7: ADD %2,%2,#4\n" \
724 " SETD [%0++],D1Ar1\n" FIXUP, \
725 " .long 6b,7b\n" TENTRY)
726
727#define __asm_copy_from_user_12(to, from, ret) \
728 __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
729
730#define __asm_copy_from_user_13(to, from, ret) \
731 __asm_copy_from_user_12x_cont(to, from, ret, \
732 " GETB D1Ar1,[%1++]\n" \
733 "8: SETB [%0++],D1Ar1\n", \
734 "9: ADD %2,%2,#1\n" \
735 " SETB [%0++],D1Ar1\n", \
736 " .long 8b,9b\n")
737
738#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
739 __asm_copy_from_user_12x_cont(to, from, ret, \
740 " GETW D1Ar1,[%1++]\n" \
741 "8: SETW [%0++],D1Ar1\n" COPY, \
742 "9: ADD %2,%2,#2\n" \
743 " SETW [%0++],D1Ar1\n" FIXUP, \
744 " .long 8b,9b\n" TENTRY)
745
746#define __asm_copy_from_user_14(to, from, ret) \
747 __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
748
749#define __asm_copy_from_user_15(to, from, ret) \
750 __asm_copy_from_user_14x_cont(to, from, ret, \
751 " GETB D1Ar1,[%1++]\n" \
752 "10: SETB [%0++],D1Ar1\n", \
753 "11: ADD %2,%2,#1\n" \
754 " SETB [%0++],D1Ar1\n", \
755 " .long 10b,11b\n")
756
757#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
758 __asm_copy_from_user_12x_cont(to, from, ret, \
759 " GETD D1Ar1,[%1++]\n" \
760 "8: SETD [%0++],D1Ar1\n" COPY, \
761 "9: ADD %2,%2,#4\n" \
762 " SETD [%0++],D1Ar1\n" FIXUP, \
763 " .long 8b,9b\n" TENTRY)
764
765#define __asm_copy_from_user_16(to, from, ret) \
766 __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
767
768#define __asm_copy_from_user_8x64(to, from, ret) \ 693#define __asm_copy_from_user_8x64(to, from, ret) \
769 asm volatile ( \ 694 asm volatile ( \
770 " GETL D0Ar2,D1Ar1,[%1++]\n" \ 695 " GETL D0Ar2,D1Ar1,[%1++]\n" \
771 "2: SETL [%0++],D0Ar2,D1Ar1\n" \ 696 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
772 "1:\n" \ 697 "1:\n" \
773 " .section .fixup,\"ax\"\n" \ 698 " .section .fixup,\"ax\"\n" \
774 " MOV D1Ar1,#0\n" \
775 " MOV D0Ar2,#0\n" \
776 "3: ADD %2,%2,#8\n" \ 699 "3: ADD %2,%2,#8\n" \
777 " SETL [%0++],D0Ar2,D1Ar1\n" \
778 " MOVT D0Ar2,#HI(1b)\n" \ 700 " MOVT D0Ar2,#HI(1b)\n" \
779 " JUMP D0Ar2,#LO(1b)\n" \ 701 " JUMP D0Ar2,#LO(1b)\n" \
780 " .previous\n" \ 702 " .previous\n" \
@@ -789,36 +711,57 @@ EXPORT_SYMBOL(__copy_user);
789 * 711 *
790 * Rationale: 712 * Rationale:
791 * A fault occurs while reading from user buffer, which is the 713 * A fault occurs while reading from user buffer, which is the
792 * source. Since the fault is at a single address, we only 714 * source.
793 * need to rewind by 8 bytes.
794 * Since we don't write to kernel buffer until we read first, 715 * Since we don't write to kernel buffer until we read first,
795 * the kernel buffer is at the right state and needn't be 716 * the kernel buffer is at the right state and needn't be
796 * corrected. 717 * corrected, but the source must be rewound to the beginning of
718 * the block, which is LSM_STEP*8 bytes.
719 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
720 * and stored in D0Ar2
721 *
722 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
723 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
724 * a fault happens at the 4th write, LSM_STEP will be 0
725 * instead of 4. The code copes with that.
797 */ 726 */
798#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \ 727#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
799 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ 728 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
800 "SUB %1, %1, #8\n") 729 "LSR D0Ar2, D0Ar2, #5\n" \
730 "ANDS D0Ar2, D0Ar2, #0x38\n" \
731 "ADDZ D0Ar2, D0Ar2, #32\n" \
732 "SUB %1, %1, D0Ar2\n")
801 733
802/* rewind 'from' pointer when a fault occurs 734/* rewind 'from' pointer when a fault occurs
803 * 735 *
804 * Rationale: 736 * Rationale:
805 * A fault occurs while reading from user buffer, which is the 737 * A fault occurs while reading from user buffer, which is the
806 * source. Since the fault is at a single address, we only 738 * source.
807 * need to rewind by 4 bytes.
808 * Since we don't write to kernel buffer until we read first, 739 * Since we don't write to kernel buffer until we read first,
809 * the kernel buffer is at the right state and needn't be 740 * the kernel buffer is at the right state and needn't be
810 * corrected. 741 * corrected, but the source must be rewound to the beginning of
742 * the block, which is LSM_STEP*4 bytes.
743 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
744 * and stored in D0Ar2
745 *
746 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
747 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
748 * a fault happens at the 4th write, LSM_STEP will be 0
749 * instead of 4. The code copes with that.
811 */ 750 */
812#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \ 751#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
813 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ 752 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
814 "SUB %1, %1, #4\n") 753 "LSR D0Ar2, D0Ar2, #6\n" \
754 "ANDS D0Ar2, D0Ar2, #0x1c\n" \
755 "ADDZ D0Ar2, D0Ar2, #16\n" \
756 "SUB %1, %1, D0Ar2\n")
815 757
816 758
817/* Copy from user to kernel, zeroing the bytes that were inaccessible in 759/*
818 userland. The return-value is the number of bytes that were 760 * Copy from user to kernel. The return-value is the number of bytes that were
819 inaccessible. */ 761 * inaccessible.
820unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, 762 */
821 unsigned long n) 763unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
764 unsigned long n)
822{ 765{
823 register char *dst asm ("A0.2") = pdst; 766 register char *dst asm ("A0.2") = pdst;
824 register const char __user *src asm ("A1.2") = psrc; 767 register const char __user *src asm ("A1.2") = psrc;
@@ -830,6 +773,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
830 if ((unsigned long) src & 1) { 773 if ((unsigned long) src & 1) {
831 __asm_copy_from_user_1(dst, src, retn); 774 __asm_copy_from_user_1(dst, src, retn);
832 n--; 775 n--;
776 if (retn)
777 return retn + n;
833 } 778 }
834 if ((unsigned long) dst & 1) { 779 if ((unsigned long) dst & 1) {
835 /* Worst case - byte copy */ 780 /* Worst case - byte copy */
@@ -837,12 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
837 __asm_copy_from_user_1(dst, src, retn); 782 __asm_copy_from_user_1(dst, src, retn);
838 n--; 783 n--;
839 if (retn) 784 if (retn)
840 goto copy_exception_bytes; 785 return retn + n;
841 } 786 }
842 } 787 }
843 if (((unsigned long) src & 2) && n >= 2) { 788 if (((unsigned long) src & 2) && n >= 2) {
844 __asm_copy_from_user_2(dst, src, retn); 789 __asm_copy_from_user_2(dst, src, retn);
845 n -= 2; 790 n -= 2;
791 if (retn)
792 return retn + n;
846 } 793 }
847 if ((unsigned long) dst & 2) { 794 if ((unsigned long) dst & 2) {
848 /* Second worst case - word copy */ 795 /* Second worst case - word copy */
@@ -850,16 +797,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
850 __asm_copy_from_user_2(dst, src, retn); 797 __asm_copy_from_user_2(dst, src, retn);
851 n -= 2; 798 n -= 2;
852 if (retn) 799 if (retn)
853 goto copy_exception_bytes; 800 return retn + n;
854 } 801 }
855 } 802 }
856 803
857 /* We only need one check after the unalignment-adjustments,
858 because if both adjustments were done, either both or
859 neither reference had an exception. */
860 if (retn != 0)
861 goto copy_exception_bytes;
862
863#ifdef USE_RAPF 804#ifdef USE_RAPF
864 /* 64 bit copy loop */ 805 /* 64 bit copy loop */
865 if (!(((unsigned long) src | (unsigned long) dst) & 7)) { 806 if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
@@ -872,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
872 __asm_copy_from_user_8x64(dst, src, retn); 813 __asm_copy_from_user_8x64(dst, src, retn);
873 n -= 8; 814 n -= 8;
874 if (retn) 815 if (retn)
875 goto copy_exception_bytes; 816 return retn + n;
876 } 817 }
877 } 818 }
878 819
@@ -888,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
888 __asm_copy_from_user_8x64(dst, src, retn); 829 __asm_copy_from_user_8x64(dst, src, retn);
889 n -= 8; 830 n -= 8;
890 if (retn) 831 if (retn)
891 goto copy_exception_bytes; 832 return retn + n;
892 } 833 }
893 } 834 }
894#endif 835#endif
@@ -898,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
898 n -= 4; 839 n -= 4;
899 840
900 if (retn) 841 if (retn)
901 goto copy_exception_bytes; 842 return retn + n;
902 } 843 }
903 844
904 /* If we get here, there were no memory read faults. */ 845 /* If we get here, there were no memory read faults. */
@@ -924,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
924 /* If we get here, retn correctly reflects the number of failing 865 /* If we get here, retn correctly reflects the number of failing
925 bytes. */ 866 bytes. */
926 return retn; 867 return retn;
927
928 copy_exception_bytes:
929 /* We already have "retn" bytes cleared, and need to clear the
930 remaining "n" bytes. A non-optimized simple byte-for-byte in-line
931 memset is preferred here, since this isn't speed-critical code and
932 we'd rather have this a leaf-function than calling memset. */
933 {
934 char *endp;
935 for (endp = dst + n; dst < endp; dst++)
936 *dst = 0;
937 }
938
939 return retn + n;
940} 868}
941EXPORT_SYMBOL(__copy_user_zeroing); 869EXPORT_SYMBOL(raw_copy_from_user);
942 870
943#define __asm_clear_8x64(to, ret) \ 871#define __asm_clear_8x64(to, ret) \
944 asm volatile ( \ 872 asm volatile ( \
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index a008a9f03072..e0bb576410bb 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1531,7 +1531,7 @@ config CPU_MIPS64_R6
1531 select CPU_SUPPORTS_HIGHMEM 1531 select CPU_SUPPORTS_HIGHMEM
1532 select CPU_SUPPORTS_MSA 1532 select CPU_SUPPORTS_MSA
1533 select GENERIC_CSUM 1533 select GENERIC_CSUM
1534 select MIPS_O32_FP64_SUPPORT if MIPS32_O32 1534 select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
1535 select HAVE_KVM 1535 select HAVE_KVM
1536 help 1536 help
1537 Choose this option to build a kernel for release 6 or later of the 1537 Choose this option to build a kernel for release 6 or later of the
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index f94455f964ec..a2813fe381cf 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -21,6 +21,7 @@
21#include <asm/cpu-features.h> 21#include <asm/cpu-features.h>
22#include <asm/fpu_emulator.h> 22#include <asm/fpu_emulator.h>
23#include <asm/hazards.h> 23#include <asm/hazards.h>
24#include <asm/ptrace.h>
24#include <asm/processor.h> 25#include <asm/processor.h>
25#include <asm/current.h> 26#include <asm/current.h>
26#include <asm/msa.h> 27#include <asm/msa.h>
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 956db6e201d1..ddd1c918103b 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -18,9 +18,24 @@
18#include <irq.h> 18#include <irq.h>
19 19
20#define IRQ_STACK_SIZE THREAD_SIZE 20#define IRQ_STACK_SIZE THREAD_SIZE
21#define IRQ_STACK_START (IRQ_STACK_SIZE - sizeof(unsigned long))
21 22
22extern void *irq_stack[NR_CPUS]; 23extern void *irq_stack[NR_CPUS];
23 24
25/*
26 * The highest address on the IRQ stack contains a dummy frame put down in
27 * genex.S (handle_int & except_vec_vi_handler) which is structured as follows:
28 *
29 * top ------------
30 * | task sp | <- irq_stack[cpu] + IRQ_STACK_START
31 * ------------
32 * | | <- First frame of IRQ context
33 * ------------
34 *
35 * task sp holds a copy of the task stack pointer where the struct pt_regs
36 * from exception entry can be found.
37 */
38
24static inline bool on_irq_stack(int cpu, unsigned long sp) 39static inline bool on_irq_stack(int cpu, unsigned long sp)
25{ 40{
26 unsigned long low = (unsigned long)irq_stack[cpu]; 41 unsigned long low = (unsigned long)irq_stack[cpu];
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index f485afe51514..a8df44d60607 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -127,7 +127,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
127 " andi %[ticket], %[ticket], 0xffff \n" 127 " andi %[ticket], %[ticket], 0xffff \n"
128 " bne %[ticket], %[my_ticket], 4f \n" 128 " bne %[ticket], %[my_ticket], 4f \n"
129 " subu %[ticket], %[my_ticket], %[ticket] \n" 129 " subu %[ticket], %[my_ticket], %[ticket] \n"
130 "2: \n" 130 "2: .insn \n"
131 " .subsection 2 \n" 131 " .subsection 2 \n"
132 "4: andi %[ticket], %[ticket], 0xffff \n" 132 "4: andi %[ticket], %[ticket], 0xffff \n"
133 " sll %[ticket], 5 \n" 133 " sll %[ticket], 5 \n"
@@ -202,7 +202,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
202 " sc %[ticket], %[ticket_ptr] \n" 202 " sc %[ticket], %[ticket_ptr] \n"
203 " beqz %[ticket], 1b \n" 203 " beqz %[ticket], 1b \n"
204 " li %[ticket], 1 \n" 204 " li %[ticket], 1 \n"
205 "2: \n" 205 "2: .insn \n"
206 " .subsection 2 \n" 206 " .subsection 2 \n"
207 "3: b 2b \n" 207 "3: b 2b \n"
208 " li %[ticket], 0 \n" 208 " li %[ticket], 0 \n"
@@ -382,7 +382,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
382 " .set reorder \n" 382 " .set reorder \n"
383 __WEAK_LLSC_MB 383 __WEAK_LLSC_MB
384 " li %2, 1 \n" 384 " li %2, 1 \n"
385 "2: \n" 385 "2: .insn \n"
386 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 386 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
387 : GCC_OFF_SMALL_ASM() (rw->lock) 387 : GCC_OFF_SMALL_ASM() (rw->lock)
388 : "memory"); 388 : "memory");
@@ -422,7 +422,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
422 " lui %1, 0x8000 \n" 422 " lui %1, 0x8000 \n"
423 " sc %1, %0 \n" 423 " sc %1, %0 \n"
424 " li %2, 1 \n" 424 " li %2, 1 \n"
425 "2: \n" 425 "2: .insn \n"
426 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), 426 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
427 "=&r" (ret) 427 "=&r" (ret)
428 : GCC_OFF_SMALL_ASM() (rw->lock) 428 : GCC_OFF_SMALL_ASM() (rw->lock)
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 3e940dbe0262..78faf4292e90 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -386,17 +386,18 @@
386#define __NR_pkey_mprotect (__NR_Linux + 363) 386#define __NR_pkey_mprotect (__NR_Linux + 363)
387#define __NR_pkey_alloc (__NR_Linux + 364) 387#define __NR_pkey_alloc (__NR_Linux + 364)
388#define __NR_pkey_free (__NR_Linux + 365) 388#define __NR_pkey_free (__NR_Linux + 365)
389#define __NR_statx (__NR_Linux + 366)
389 390
390 391
391/* 392/*
392 * Offset of the last Linux o32 flavoured syscall 393 * Offset of the last Linux o32 flavoured syscall
393 */ 394 */
394#define __NR_Linux_syscalls 365 395#define __NR_Linux_syscalls 366
395 396
396#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 397#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
397 398
398#define __NR_O32_Linux 4000 399#define __NR_O32_Linux 4000
399#define __NR_O32_Linux_syscalls 365 400#define __NR_O32_Linux_syscalls 366
400 401
401#if _MIPS_SIM == _MIPS_SIM_ABI64 402#if _MIPS_SIM == _MIPS_SIM_ABI64
402 403
@@ -730,16 +731,17 @@
730#define __NR_pkey_mprotect (__NR_Linux + 323) 731#define __NR_pkey_mprotect (__NR_Linux + 323)
731#define __NR_pkey_alloc (__NR_Linux + 324) 732#define __NR_pkey_alloc (__NR_Linux + 324)
732#define __NR_pkey_free (__NR_Linux + 325) 733#define __NR_pkey_free (__NR_Linux + 325)
734#define __NR_statx (__NR_Linux + 326)
733 735
734/* 736/*
735 * Offset of the last Linux 64-bit flavoured syscall 737 * Offset of the last Linux 64-bit flavoured syscall
736 */ 738 */
737#define __NR_Linux_syscalls 325 739#define __NR_Linux_syscalls 326
738 740
739#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 741#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
740 742
741#define __NR_64_Linux 5000 743#define __NR_64_Linux 5000
742#define __NR_64_Linux_syscalls 325 744#define __NR_64_Linux_syscalls 326
743 745
744#if _MIPS_SIM == _MIPS_SIM_NABI32 746#if _MIPS_SIM == _MIPS_SIM_NABI32
745 747
@@ -1077,15 +1079,16 @@
1077#define __NR_pkey_mprotect (__NR_Linux + 327) 1079#define __NR_pkey_mprotect (__NR_Linux + 327)
1078#define __NR_pkey_alloc (__NR_Linux + 328) 1080#define __NR_pkey_alloc (__NR_Linux + 328)
1079#define __NR_pkey_free (__NR_Linux + 329) 1081#define __NR_pkey_free (__NR_Linux + 329)
1082#define __NR_statx (__NR_Linux + 330)
1080 1083
1081/* 1084/*
1082 * Offset of the last N32 flavoured syscall 1085 * Offset of the last N32 flavoured syscall
1083 */ 1086 */
1084#define __NR_Linux_syscalls 329 1087#define __NR_Linux_syscalls 330
1085 1088
1086#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1089#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
1087 1090
1088#define __NR_N32_Linux 6000 1091#define __NR_N32_Linux 6000
1089#define __NR_N32_Linux_syscalls 329 1092#define __NR_N32_Linux_syscalls 330
1090 1093
1091#endif /* _UAPI_ASM_UNISTD_H */ 1094#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index bb5c5d34ba81..a670c0c11875 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -102,6 +102,7 @@ void output_thread_info_defines(void)
102 DEFINE(_THREAD_SIZE, THREAD_SIZE); 102 DEFINE(_THREAD_SIZE, THREAD_SIZE);
103 DEFINE(_THREAD_MASK, THREAD_MASK); 103 DEFINE(_THREAD_MASK, THREAD_MASK);
104 DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE); 104 DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
105 DEFINE(_IRQ_STACK_START, IRQ_STACK_START);
105 BLANK(); 106 BLANK();
106} 107}
107 108
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 59476a607add..a00e87b0256d 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -361,7 +361,7 @@ LEAF(mips_cps_get_bootcfg)
361 END(mips_cps_get_bootcfg) 361 END(mips_cps_get_bootcfg)
362 362
363LEAF(mips_cps_boot_vpes) 363LEAF(mips_cps_boot_vpes)
364 PTR_L ta2, COREBOOTCFG_VPEMASK(a0) 364 lw ta2, COREBOOTCFG_VPEMASK(a0)
365 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) 365 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0)
366 366
367#if defined(CONFIG_CPU_MIPSR6) 367#if defined(CONFIG_CPU_MIPSR6)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 07718bb5fc9d..12422fd4af23 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -1824,7 +1824,7 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
1824 } 1824 }
1825 1825
1826 decode_configs(c); 1826 decode_configs(c);
1827 c->options |= MIPS_CPU_TLBINV | MIPS_CPU_LDPTE; 1827 c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
1828 c->writecombine = _CACHE_UNCACHED_ACCELERATED; 1828 c->writecombine = _CACHE_UNCACHED_ACCELERATED;
1829 break; 1829 break;
1830 default: 1830 default:
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 7ec9612cb007..ae810da4d499 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -215,9 +215,11 @@ NESTED(handle_int, PT_SIZE, sp)
215 beq t0, t1, 2f 215 beq t0, t1, 2f
216 216
217 /* Switch to IRQ stack */ 217 /* Switch to IRQ stack */
218 li t1, _IRQ_STACK_SIZE 218 li t1, _IRQ_STACK_START
219 PTR_ADD sp, t0, t1 219 PTR_ADD sp, t0, t1
220 220
221 /* Save task's sp on IRQ stack so that unwinding can follow it */
222 LONG_S s1, 0(sp)
2212: 2232:
222 jal plat_irq_dispatch 224 jal plat_irq_dispatch
223 225
@@ -325,9 +327,11 @@ NESTED(except_vec_vi_handler, 0, sp)
325 beq t0, t1, 2f 327 beq t0, t1, 2f
326 328
327 /* Switch to IRQ stack */ 329 /* Switch to IRQ stack */
328 li t1, _IRQ_STACK_SIZE 330 li t1, _IRQ_STACK_START
329 PTR_ADD sp, t0, t1 331 PTR_ADD sp, t0, t1
330 332
333 /* Save task's sp on IRQ stack so that unwinding can follow it */
334 LONG_S s1, 0(sp)
3312: 3352:
332 jalr v0 336 jalr v0
333 337
@@ -519,7 +523,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
519 BUILD_HANDLER reserved reserved sti verbose /* others */ 523 BUILD_HANDLER reserved reserved sti verbose /* others */
520 524
521 .align 5 525 .align 5
522 LEAF(handle_ri_rdhwr_vivt) 526 LEAF(handle_ri_rdhwr_tlbp)
523 .set push 527 .set push
524 .set noat 528 .set noat
525 .set noreorder 529 .set noreorder
@@ -538,7 +542,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
538 .set pop 542 .set pop
539 bltz k1, handle_ri /* slow path */ 543 bltz k1, handle_ri /* slow path */
540 /* fall thru */ 544 /* fall thru */
541 END(handle_ri_rdhwr_vivt) 545 END(handle_ri_rdhwr_tlbp)
542 546
543 LEAF(handle_ri_rdhwr) 547 LEAF(handle_ri_rdhwr)
544 .set push 548 .set push
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index fb6b6b650719..b68e10fc453d 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -488,31 +488,52 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
488 unsigned long pc, 488 unsigned long pc,
489 unsigned long *ra) 489 unsigned long *ra)
490{ 490{
491 unsigned long low, high, irq_stack_high;
491 struct mips_frame_info info; 492 struct mips_frame_info info;
492 unsigned long size, ofs; 493 unsigned long size, ofs;
494 struct pt_regs *regs;
493 int leaf; 495 int leaf;
494 extern void ret_from_irq(void);
495 extern void ret_from_exception(void);
496 496
497 if (!stack_page) 497 if (!stack_page)
498 return 0; 498 return 0;
499 499
500 /* 500 /*
501 * If we reached the bottom of interrupt context, 501 * IRQ stacks start at IRQ_STACK_START
502 * return saved pc in pt_regs. 502 * task stacks at THREAD_SIZE - 32
503 */ 503 */
504 if (pc == (unsigned long)ret_from_irq || 504 low = stack_page;
505 pc == (unsigned long)ret_from_exception) { 505 if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
506 struct pt_regs *regs; 506 high = stack_page + IRQ_STACK_START;
507 if (*sp >= stack_page && 507 irq_stack_high = high;
508 *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) { 508 } else {
509 regs = (struct pt_regs *)*sp; 509 high = stack_page + THREAD_SIZE - 32;
510 pc = regs->cp0_epc; 510 irq_stack_high = 0;
511 if (!user_mode(regs) && __kernel_text_address(pc)) { 511 }
512 *sp = regs->regs[29]; 512
513 *ra = regs->regs[31]; 513 /*
514 return pc; 514 * If we reached the top of the interrupt stack, start unwinding
515 } 515 * the interrupted task stack.
516 */
517 if (unlikely(*sp == irq_stack_high)) {
518 unsigned long task_sp = *(unsigned long *)*sp;
519
520 /*
521 * Check that the pointer saved in the IRQ stack head points to
522 * something within the stack of the current task
523 */
524 if (!object_is_on_stack((void *)task_sp))
525 return 0;
526
527 /*
528 * Follow pointer to tasks kernel stack frame where interrupted
529 * state was saved.
530 */
531 regs = (struct pt_regs *)task_sp;
532 pc = regs->cp0_epc;
533 if (!user_mode(regs) && __kernel_text_address(pc)) {
534 *sp = regs->regs[29];
535 *ra = regs->regs[31];
536 return pc;
516 } 537 }
517 return 0; 538 return 0;
518 } 539 }
@@ -533,8 +554,7 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
533 if (leaf < 0) 554 if (leaf < 0)
534 return 0; 555 return 0;
535 556
536 if (*sp < stack_page || 557 if (*sp < low || *sp + info.frame_size > high)
537 *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
538 return 0; 558 return 0;
539 559
540 if (leaf) 560 if (leaf)
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 339601267265..6931fe722a0b 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -456,7 +456,8 @@ static int fpr_set(struct task_struct *target,
456 &target->thread.fpu, 456 &target->thread.fpu,
457 0, sizeof(elf_fpregset_t)); 457 0, sizeof(elf_fpregset_t));
458 458
459 for (i = 0; i < NUM_FPU_REGS; i++) { 459 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
460 for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) {
460 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 461 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
461 &fpr_val, i * sizeof(elf_fpreg_t), 462 &fpr_val, i * sizeof(elf_fpreg_t),
462 (i + 1) * sizeof(elf_fpreg_t)); 463 (i + 1) * sizeof(elf_fpreg_t));
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index c29d397eee86..80ed68b2c95e 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -600,3 +600,4 @@ EXPORT(sys_call_table)
600 PTR sys_pkey_mprotect 600 PTR sys_pkey_mprotect
601 PTR sys_pkey_alloc 601 PTR sys_pkey_alloc
602 PTR sys_pkey_free /* 4365 */ 602 PTR sys_pkey_free /* 4365 */
603 PTR sys_statx
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 0687f96ee912..49765b44aa9b 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -438,4 +438,5 @@ EXPORT(sys_call_table)
438 PTR sys_pkey_mprotect 438 PTR sys_pkey_mprotect
439 PTR sys_pkey_alloc 439 PTR sys_pkey_alloc
440 PTR sys_pkey_free /* 5325 */ 440 PTR sys_pkey_free /* 5325 */
441 PTR sys_statx
441 .size sys_call_table,.-sys_call_table 442 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 0331ba39a065..90bad2d1b2d3 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -433,4 +433,5 @@ EXPORT(sysn32_call_table)
433 PTR sys_pkey_mprotect 433 PTR sys_pkey_mprotect
434 PTR sys_pkey_alloc 434 PTR sys_pkey_alloc
435 PTR sys_pkey_free 435 PTR sys_pkey_free
436 PTR sys_statx /* 6330 */
436 .size sysn32_call_table,.-sysn32_call_table 437 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 5a47042dd25f..2dd70bd104e1 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -588,4 +588,5 @@ EXPORT(sys32_call_table)
588 PTR sys_pkey_mprotect 588 PTR sys_pkey_mprotect
589 PTR sys_pkey_alloc 589 PTR sys_pkey_alloc
590 PTR sys_pkey_free /* 4365 */ 590 PTR sys_pkey_free /* 4365 */
591 PTR sys_statx
591 .size sys32_call_table,.-sys32_call_table 592 .size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index c7d17cfb32f6..b49e7bf9f950 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -83,7 +83,7 @@ extern asmlinkage void handle_dbe(void);
83extern asmlinkage void handle_sys(void); 83extern asmlinkage void handle_sys(void);
84extern asmlinkage void handle_bp(void); 84extern asmlinkage void handle_bp(void);
85extern asmlinkage void handle_ri(void); 85extern asmlinkage void handle_ri(void);
86extern asmlinkage void handle_ri_rdhwr_vivt(void); 86extern asmlinkage void handle_ri_rdhwr_tlbp(void);
87extern asmlinkage void handle_ri_rdhwr(void); 87extern asmlinkage void handle_ri_rdhwr(void);
88extern asmlinkage void handle_cpu(void); 88extern asmlinkage void handle_cpu(void);
89extern asmlinkage void handle_ov(void); 89extern asmlinkage void handle_ov(void);
@@ -2408,9 +2408,18 @@ void __init trap_init(void)
2408 2408
2409 set_except_vector(EXCCODE_SYS, handle_sys); 2409 set_except_vector(EXCCODE_SYS, handle_sys);
2410 set_except_vector(EXCCODE_BP, handle_bp); 2410 set_except_vector(EXCCODE_BP, handle_bp);
2411 set_except_vector(EXCCODE_RI, rdhwr_noopt ? handle_ri : 2411
2412 (cpu_has_vtag_icache ? 2412 if (rdhwr_noopt)
2413 handle_ri_rdhwr_vivt : handle_ri_rdhwr)); 2413 set_except_vector(EXCCODE_RI, handle_ri);
2414 else {
2415 if (cpu_has_vtag_icache)
2416 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2417 else if (current_cpu_type() == CPU_LOONGSON3)
2418 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2419 else
2420 set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
2421 }
2422
2414 set_except_vector(EXCCODE_CPU, handle_cpu); 2423 set_except_vector(EXCCODE_CPU, handle_cpu);
2415 set_except_vector(EXCCODE_OV, handle_ov); 2424 set_except_vector(EXCCODE_OV, handle_ov);
2416 set_except_vector(EXCCODE_TR, handle_tr); 2425 set_except_vector(EXCCODE_TR, handle_tr);
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
index 3c3aa05891dd..95bec460b651 100644
--- a/arch/mips/lantiq/xway/sysctrl.c
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -467,7 +467,7 @@ void __init ltq_soc_init(void)
467 467
468 if (!np_xbar) 468 if (!np_xbar)
469 panic("Failed to load xbar nodes from devicetree"); 469 panic("Failed to load xbar nodes from devicetree");
470 if (of_address_to_resource(np_pmu, 0, &res_xbar)) 470 if (of_address_to_resource(np_xbar, 0, &res_xbar))
471 panic("Failed to get xbar resources"); 471 panic("Failed to get xbar resources");
472 if (!request_mem_region(res_xbar.start, resource_size(&res_xbar), 472 if (!request_mem_region(res_xbar.start, resource_size(&res_xbar),
473 res_xbar.name)) 473 res_xbar.name))
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index e7f798d55fbc..3fe99cb271a9 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1562,6 +1562,7 @@ static void probe_vcache(void)
1562 vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz; 1562 vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz;
1563 1563
1564 c->vcache.waybit = 0; 1564 c->vcache.waybit = 0;
1565 c->vcache.waysize = vcache_size / c->vcache.ways;
1565 1566
1566 pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n", 1567 pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
1567 vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz); 1568 vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
@@ -1664,6 +1665,7 @@ static void __init loongson3_sc_init(void)
1664 /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */ 1665 /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
1665 scache_size *= 4; 1666 scache_size *= 4;
1666 c->scache.waybit = 0; 1667 c->scache.waybit = 0;
1668 c->scache.waysize = scache_size / c->scache.ways;
1667 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", 1669 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1668 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); 1670 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1669 if (scache_size) 1671 if (scache_size)
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 9bfee8988eaf..4f642e07c2b1 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -760,7 +760,8 @@ static void build_huge_update_entries(u32 **p, unsigned int pte,
760static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, 760static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
761 struct uasm_label **l, 761 struct uasm_label **l,
762 unsigned int pte, 762 unsigned int pte,
763 unsigned int ptr) 763 unsigned int ptr,
764 unsigned int flush)
764{ 765{
765#ifdef CONFIG_SMP 766#ifdef CONFIG_SMP
766 UASM_i_SC(p, pte, 0, ptr); 767 UASM_i_SC(p, pte, 0, ptr);
@@ -769,6 +770,22 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
769#else 770#else
770 UASM_i_SW(p, pte, 0, ptr); 771 UASM_i_SW(p, pte, 0, ptr);
771#endif 772#endif
773 if (cpu_has_ftlb && flush) {
774 BUG_ON(!cpu_has_tlbinv);
775
776 UASM_i_MFC0(p, ptr, C0_ENTRYHI);
777 uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
778 UASM_i_MTC0(p, ptr, C0_ENTRYHI);
779 build_tlb_write_entry(p, l, r, tlb_indexed);
780
781 uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
782 UASM_i_MTC0(p, ptr, C0_ENTRYHI);
783 build_huge_update_entries(p, pte, ptr);
784 build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
785
786 return;
787 }
788
772 build_huge_update_entries(p, pte, ptr); 789 build_huge_update_entries(p, pte, ptr);
773 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0); 790 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
774} 791}
@@ -2199,7 +2216,7 @@ static void build_r4000_tlb_load_handler(void)
2199 uasm_l_tlbl_goaround2(&l, p); 2216 uasm_l_tlbl_goaround2(&l, p);
2200 } 2217 }
2201 uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID)); 2218 uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
2202 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2219 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
2203#endif 2220#endif
2204 2221
2205 uasm_l_nopage_tlbl(&l, p); 2222 uasm_l_nopage_tlbl(&l, p);
@@ -2254,7 +2271,7 @@ static void build_r4000_tlb_store_handler(void)
2254 build_tlb_probe_entry(&p); 2271 build_tlb_probe_entry(&p);
2255 uasm_i_ori(&p, wr.r1, wr.r1, 2272 uasm_i_ori(&p, wr.r1, wr.r1,
2256 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2273 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2257 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2274 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
2258#endif 2275#endif
2259 2276
2260 uasm_l_nopage_tlbs(&l, p); 2277 uasm_l_nopage_tlbs(&l, p);
@@ -2310,7 +2327,7 @@ static void build_r4000_tlb_modify_handler(void)
2310 build_tlb_probe_entry(&p); 2327 build_tlb_probe_entry(&p);
2311 uasm_i_ori(&p, wr.r1, wr.r1, 2328 uasm_i_ori(&p, wr.r1, wr.r1,
2312 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2329 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2313 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2330 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
2314#endif 2331#endif
2315 2332
2316 uasm_l_nopage_tlbm(&l, p); 2333 uasm_l_nopage_tlbm(&l, p);
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
index c4ffd43d3996..48ce701557a4 100644
--- a/arch/mips/ralink/rt3883.c
+++ b/arch/mips/ralink/rt3883.c
@@ -35,7 +35,7 @@ static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
35static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; 35static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
36static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; 36static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
37static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) }; 37static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
38static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) }; 38static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
39static struct rt2880_pmx_func pci_func[] = { 39static struct rt2880_pmx_func pci_func[] = {
40 FUNC("pci-dev", 0, 40, 32), 40 FUNC("pci-dev", 0, 40, 32),
41 FUNC("pci-host2", 1, 40, 32), 41 FUNC("pci-host2", 1, 40, 32),
@@ -43,7 +43,7 @@ static struct rt2880_pmx_func pci_func[] = {
43 FUNC("pci-fnc", 3, 40, 32) 43 FUNC("pci-fnc", 3, 40, 32)
44}; 44};
45static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) }; 45static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
46static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) }; 46static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
47 47
48static struct rt2880_pmx_group rt3883_pinmux_data[] = { 48static struct rt2880_pmx_group rt3883_pinmux_data[] = {
49 GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C), 49 GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
index 367c5426157b..3901b80d4420 100644
--- a/arch/nios2/kernel/prom.c
+++ b/arch/nios2/kernel/prom.c
@@ -48,6 +48,13 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
48 return alloc_bootmem_align(size, align); 48 return alloc_bootmem_align(size, align);
49} 49}
50 50
51int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
52 bool nomap)
53{
54 reserve_bootmem(base, size, BOOTMEM_DEFAULT);
55 return 0;
56}
57
51void __init early_init_devtree(void *params) 58void __init early_init_devtree(void *params)
52{ 59{
53 __be32 *dtb = (u32 *)__dtb_start; 60 __be32 *dtb = (u32 *)__dtb_start;
diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
index 6e57ffa5db27..6044d9be28b4 100644
--- a/arch/nios2/kernel/setup.c
+++ b/arch/nios2/kernel/setup.c
@@ -201,6 +201,9 @@ void __init setup_arch(char **cmdline_p)
201 } 201 }
202#endif /* CONFIG_BLK_DEV_INITRD */ 202#endif /* CONFIG_BLK_DEV_INITRD */
203 203
204 early_init_fdt_reserve_self();
205 early_init_fdt_scan_reserved_mem();
206
204 unflatten_and_copy_device_tree(); 207 unflatten_and_copy_device_tree();
205 208
206 setup_cpuinfo(); 209 setup_cpuinfo();
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index edfbf9d6a6dd..cbd4f4af8108 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -39,10 +39,10 @@
39#define get_user __get_user 39#define get_user __get_user
40 40
41#if !defined(CONFIG_64BIT) 41#if !defined(CONFIG_64BIT)
42#define LDD_USER(ptr) __get_user_asm64(ptr) 42#define LDD_USER(val, ptr) __get_user_asm64(val, ptr)
43#define STD_USER(x, ptr) __put_user_asm64(x, ptr) 43#define STD_USER(x, ptr) __put_user_asm64(x, ptr)
44#else 44#else
45#define LDD_USER(ptr) __get_user_asm("ldd", ptr) 45#define LDD_USER(val, ptr) __get_user_asm(val, "ldd", ptr)
46#define STD_USER(x, ptr) __put_user_asm("std", x, ptr) 46#define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
47#endif 47#endif
48 48
@@ -65,6 +65,15 @@ struct exception_table_entry {
65 ".previous\n" 65 ".previous\n"
66 66
67/* 67/*
68 * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
69 * (with lowest bit set) for which the fault handler in fixup_exception() will
70 * load -EFAULT into %r8 for a read or write fault, and zeroes the target
71 * register in case of a read fault in get_user().
72 */
73#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
74 ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
75
76/*
68 * The page fault handler stores, in a per-cpu area, the following information 77 * The page fault handler stores, in a per-cpu area, the following information
69 * if a fixup routine is available. 78 * if a fixup routine is available.
70 */ 79 */
@@ -88,92 +97,116 @@ struct exception_data {
88 " mtsp %0,%%sr2\n\t" \ 97 " mtsp %0,%%sr2\n\t" \
89 : : "r"(get_fs()) : ) 98 : : "r"(get_fs()) : )
90 99
91#define __get_user(x, ptr) \ 100#define __get_user_internal(val, ptr) \
92({ \ 101({ \
93 register long __gu_err __asm__ ("r8") = 0; \ 102 register long __gu_err __asm__ ("r8") = 0; \
94 register long __gu_val __asm__ ("r9") = 0; \ 103 \
95 \ 104 switch (sizeof(*(ptr))) { \
96 load_sr2(); \ 105 case 1: __get_user_asm(val, "ldb", ptr); break; \
97 switch (sizeof(*(ptr))) { \ 106 case 2: __get_user_asm(val, "ldh", ptr); break; \
98 case 1: __get_user_asm("ldb", ptr); break; \ 107 case 4: __get_user_asm(val, "ldw", ptr); break; \
99 case 2: __get_user_asm("ldh", ptr); break; \ 108 case 8: LDD_USER(val, ptr); break; \
100 case 4: __get_user_asm("ldw", ptr); break; \ 109 default: BUILD_BUG(); \
101 case 8: LDD_USER(ptr); break; \ 110 } \
102 default: BUILD_BUG(); break; \ 111 \
103 } \ 112 __gu_err; \
104 \
105 (x) = (__force __typeof__(*(ptr))) __gu_val; \
106 __gu_err; \
107}) 113})
108 114
109#define __get_user_asm(ldx, ptr) \ 115#define __get_user(val, ptr) \
110 __asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t" \ 116({ \
111 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\ 117 load_sr2(); \
118 __get_user_internal(val, ptr); \
119})
120
121#define __get_user_asm(val, ldx, ptr) \
122{ \
123 register long __gu_val; \
124 \
125 __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
126 "9:\n" \
127 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
112 : "=r"(__gu_val), "=r"(__gu_err) \ 128 : "=r"(__gu_val), "=r"(__gu_err) \
113 : "r"(ptr), "1"(__gu_err) \ 129 : "r"(ptr), "1"(__gu_err)); \
114 : "r1"); 130 \
131 (val) = (__force __typeof__(*(ptr))) __gu_val; \
132}
115 133
116#if !defined(CONFIG_64BIT) 134#if !defined(CONFIG_64BIT)
117 135
118#define __get_user_asm64(ptr) \ 136#define __get_user_asm64(val, ptr) \
119 __asm__("\n1:\tldw 0(%%sr2,%2),%0" \ 137{ \
120 "\n2:\tldw 4(%%sr2,%2),%R0\n\t" \ 138 union { \
121 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\ 139 unsigned long long l; \
122 ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\ 140 __typeof__(*(ptr)) t; \
123 : "=r"(__gu_val), "=r"(__gu_err) \ 141 } __gu_tmp; \
124 : "r"(ptr), "1"(__gu_err) \ 142 \
125 : "r1"); 143 __asm__(" copy %%r0,%R0\n" \
144 "1: ldw 0(%%sr2,%2),%0\n" \
145 "2: ldw 4(%%sr2,%2),%R0\n" \
146 "9:\n" \
147 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
148 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
149 : "=&r"(__gu_tmp.l), "=r"(__gu_err) \
150 : "r"(ptr), "1"(__gu_err)); \
151 \
152 (val) = __gu_tmp.t; \
153}
126 154
127#endif /* !defined(CONFIG_64BIT) */ 155#endif /* !defined(CONFIG_64BIT) */
128 156
129 157
130#define __put_user(x, ptr) \ 158#define __put_user_internal(x, ptr) \
131({ \ 159({ \
132 register long __pu_err __asm__ ("r8") = 0; \ 160 register long __pu_err __asm__ ("r8") = 0; \
133 __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \ 161 __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
134 \ 162 \
135 load_sr2(); \
136 switch (sizeof(*(ptr))) { \ 163 switch (sizeof(*(ptr))) { \
137 case 1: __put_user_asm("stb", __x, ptr); break; \ 164 case 1: __put_user_asm("stb", __x, ptr); break; \
138 case 2: __put_user_asm("sth", __x, ptr); break; \ 165 case 2: __put_user_asm("sth", __x, ptr); break; \
139 case 4: __put_user_asm("stw", __x, ptr); break; \ 166 case 4: __put_user_asm("stw", __x, ptr); break; \
140 case 8: STD_USER(__x, ptr); break; \ 167 case 8: STD_USER(__x, ptr); break; \
141 default: BUILD_BUG(); break; \ 168 default: BUILD_BUG(); \
142 } \ 169 } \
143 \ 170 \
144 __pu_err; \ 171 __pu_err; \
145}) 172})
146 173
174#define __put_user(x, ptr) \
175({ \
176 load_sr2(); \
177 __put_user_internal(x, ptr); \
178})
179
180
147/* 181/*
148 * The "__put_user/kernel_asm()" macros tell gcc they read from memory 182 * The "__put_user/kernel_asm()" macros tell gcc they read from memory
149 * instead of writing. This is because they do not write to any memory 183 * instead of writing. This is because they do not write to any memory
150 * gcc knows about, so there are no aliasing issues. These macros must 184 * gcc knows about, so there are no aliasing issues. These macros must
151 * also be aware that "fixup_put_user_skip_[12]" are executed in the 185 * also be aware that fixups are executed in the context of the fault,
152 * context of the fault, and any registers used there must be listed 186 * and any registers used there must be listed as clobbers.
153 * as clobbers. In this case only "r1" is used by the current routines. 187 * r8 is already listed as err.
154 * r8/r9 are already listed as err/val.
155 */ 188 */
156 189
157#define __put_user_asm(stx, x, ptr) \ 190#define __put_user_asm(stx, x, ptr) \
158 __asm__ __volatile__ ( \ 191 __asm__ __volatile__ ( \
159 "\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t" \ 192 "1: " stx " %2,0(%%sr2,%1)\n" \
160 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\ 193 "9:\n" \
194 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
161 : "=r"(__pu_err) \ 195 : "=r"(__pu_err) \
162 : "r"(ptr), "r"(x), "0"(__pu_err) \ 196 : "r"(ptr), "r"(x), "0"(__pu_err))
163 : "r1")
164 197
165 198
166#if !defined(CONFIG_64BIT) 199#if !defined(CONFIG_64BIT)
167 200
168#define __put_user_asm64(__val, ptr) do { \ 201#define __put_user_asm64(__val, ptr) do { \
169 __asm__ __volatile__ ( \ 202 __asm__ __volatile__ ( \
170 "\n1:\tstw %2,0(%%sr2,%1)" \ 203 "1: stw %2,0(%%sr2,%1)\n" \
171 "\n2:\tstw %R2,4(%%sr2,%1)\n\t" \ 204 "2: stw %R2,4(%%sr2,%1)\n" \
172 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\ 205 "9:\n" \
173 ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\ 206 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
207 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
174 : "=r"(__pu_err) \ 208 : "=r"(__pu_err) \
175 : "r"(ptr), "r"(__val), "0"(__pu_err) \ 209 : "r"(ptr), "r"(__val), "0"(__pu_err)); \
176 : "r1"); \
177} while (0) 210} while (0)
178 211
179#endif /* !defined(CONFIG_64BIT) */ 212#endif /* !defined(CONFIG_64BIT) */
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 7484b3d11e0d..c6d6272a934f 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -47,16 +47,6 @@ EXPORT_SYMBOL(__cmpxchg_u64);
47EXPORT_SYMBOL(lclear_user); 47EXPORT_SYMBOL(lclear_user);
48EXPORT_SYMBOL(lstrnlen_user); 48EXPORT_SYMBOL(lstrnlen_user);
49 49
50/* Global fixups - defined as int to avoid creation of function pointers */
51extern int fixup_get_user_skip_1;
52extern int fixup_get_user_skip_2;
53extern int fixup_put_user_skip_1;
54extern int fixup_put_user_skip_2;
55EXPORT_SYMBOL(fixup_get_user_skip_1);
56EXPORT_SYMBOL(fixup_get_user_skip_2);
57EXPORT_SYMBOL(fixup_put_user_skip_1);
58EXPORT_SYMBOL(fixup_put_user_skip_2);
59
60#ifndef CONFIG_64BIT 50#ifndef CONFIG_64BIT
61/* Needed so insmod can set dp value */ 51/* Needed so insmod can set dp value */
62extern int $global$; 52extern int $global$;
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index b76f503eee4a..4516a5b53f38 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -143,6 +143,8 @@ void machine_power_off(void)
143 printk(KERN_EMERG "System shut down completed.\n" 143 printk(KERN_EMERG "System shut down completed.\n"
144 "Please power this system off now."); 144 "Please power this system off now.");
145 145
146 /* prevent soft lockup/stalled CPU messages for endless loop. */
147 rcu_sysrq_start();
146 for (;;); 148 for (;;);
147} 149}
148 150
diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile
index 8fa92b8d839a..f2dac4d73b1b 100644
--- a/arch/parisc/lib/Makefile
+++ b/arch/parisc/lib/Makefile
@@ -2,7 +2,7 @@
2# Makefile for parisc-specific library files 2# Makefile for parisc-specific library files
3# 3#
4 4
5lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \ 5lib-y := lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \
6 ucmpdi2.o delay.o 6 ucmpdi2.o delay.o
7 7
8obj-y := iomap.o 8obj-y := iomap.o
diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
deleted file mode 100644
index a5b72f22c7a6..000000000000
--- a/arch/parisc/lib/fixup.S
+++ /dev/null
@@ -1,98 +0,0 @@
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * Copyright (C) 2004 Randolph Chung <tausq@debian.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 *
20 * Fixup routines for kernel exception handling.
21 */
22#include <asm/asm-offsets.h>
23#include <asm/assembly.h>
24#include <asm/errno.h>
25#include <linux/linkage.h>
26
27#ifdef CONFIG_SMP
28 .macro get_fault_ip t1 t2
29 loadgp
30 addil LT%__per_cpu_offset,%r27
31 LDREG RT%__per_cpu_offset(%r1),\t1
32 /* t2 = smp_processor_id() */
33 mfctl 30,\t2
34 ldw TI_CPU(\t2),\t2
35#ifdef CONFIG_64BIT
36 extrd,u \t2,63,32,\t2
37#endif
38 /* t2 = &__per_cpu_offset[smp_processor_id()]; */
39 LDREGX \t2(\t1),\t2
40 addil LT%exception_data,%r27
41 LDREG RT%exception_data(%r1),\t1
42 /* t1 = this_cpu_ptr(&exception_data) */
43 add,l \t1,\t2,\t1
44 /* %r27 = t1->fault_gp - restore gp */
45 LDREG EXCDATA_GP(\t1), %r27
46 /* t1 = t1->fault_ip */
47 LDREG EXCDATA_IP(\t1), \t1
48 .endm
49#else
50 .macro get_fault_ip t1 t2
51 loadgp
52 /* t1 = this_cpu_ptr(&exception_data) */
53 addil LT%exception_data,%r27
54 LDREG RT%exception_data(%r1),\t2
55 /* %r27 = t2->fault_gp - restore gp */
56 LDREG EXCDATA_GP(\t2), %r27
57 /* t1 = t2->fault_ip */
58 LDREG EXCDATA_IP(\t2), \t1
59 .endm
60#endif
61
62 .level LEVEL
63
64 .text
65 .section .fixup, "ax"
66
67 /* get_user() fixups, store -EFAULT in r8, and 0 in r9 */
68ENTRY_CFI(fixup_get_user_skip_1)
69 get_fault_ip %r1,%r8
70 ldo 4(%r1), %r1
71 ldi -EFAULT, %r8
72 bv %r0(%r1)
73 copy %r0, %r9
74ENDPROC_CFI(fixup_get_user_skip_1)
75
76ENTRY_CFI(fixup_get_user_skip_2)
77 get_fault_ip %r1,%r8
78 ldo 8(%r1), %r1
79 ldi -EFAULT, %r8
80 bv %r0(%r1)
81 copy %r0, %r9
82ENDPROC_CFI(fixup_get_user_skip_2)
83
84 /* put_user() fixups, store -EFAULT in r8 */
85ENTRY_CFI(fixup_put_user_skip_1)
86 get_fault_ip %r1,%r8
87 ldo 4(%r1), %r1
88 bv %r0(%r1)
89 ldi -EFAULT, %r8
90ENDPROC_CFI(fixup_put_user_skip_1)
91
92ENTRY_CFI(fixup_put_user_skip_2)
93 get_fault_ip %r1,%r8
94 ldo 8(%r1), %r1
95 bv %r0(%r1)
96 ldi -EFAULT, %r8
97ENDPROC_CFI(fixup_put_user_skip_2)
98
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
index 56845de6b5df..85c28bb80fb7 100644
--- a/arch/parisc/lib/lusercopy.S
+++ b/arch/parisc/lib/lusercopy.S
@@ -5,6 +5,8 @@
5 * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org> 5 * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
6 * Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr> 6 * Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr>
7 * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org> 7 * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
8 * Copyright (C) 2017 Helge Deller <deller@gmx.de>
9 * Copyright (C) 2017 John David Anglin <dave.anglin@bell.net>
8 * 10 *
9 * 11 *
10 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
@@ -132,4 +134,321 @@ ENDPROC_CFI(lstrnlen_user)
132 134
133 .procend 135 .procend
134 136
137
138
139/*
140 * unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
141 *
142 * Inputs:
143 * - sr1 already contains space of source region
144 * - sr2 already contains space of destination region
145 *
146 * Returns:
147 * - number of bytes that could not be copied.
148 * On success, this will be zero.
149 *
150 * This code is based on a C-implementation of a copy routine written by
151 * Randolph Chung, which in turn was derived from the glibc.
152 *
153 * Several strategies are tried to try to get the best performance for various
154 * conditions. In the optimal case, we copy by loops that copy 32- or 16-bytes
155 * at a time using general registers. Unaligned copies are handled either by
156 * aligning the destination and then using shift-and-write method, or in a few
157 * cases by falling back to a byte-at-a-time copy.
158 *
159 * Testing with various alignments and buffer sizes shows that this code is
160 * often >10x faster than a simple byte-at-a-time copy, even for strangely
161 * aligned operands. It is interesting to note that the glibc version of memcpy
162 * (written in C) is actually quite fast already. This routine is able to beat
163 * it by 30-40% for aligned copies because of the loop unrolling, but in some
164 * cases the glibc version is still slightly faster. This lends more
165 * credibility that gcc can generate very good code as long as we are careful.
166 *
167 * Possible optimizations:
168 * - add cache prefetching
169 * - try not to use the post-increment address modifiers; they may create
170 * additional interlocks. Assumption is that those were only efficient on old
171 * machines (pre PA8000 processors)
172 */
173
174 dst = arg0
175 src = arg1
176 len = arg2
177 end = arg3
178 t1 = r19
179 t2 = r20
180 t3 = r21
181 t4 = r22
182 srcspc = sr1
183 dstspc = sr2
184
185 t0 = r1
186 a1 = t1
187 a2 = t2
188 a3 = t3
189 a0 = t4
190
191 save_src = ret0
192 save_dst = ret1
193 save_len = r31
194
195ENTRY_CFI(pa_memcpy)
196 .proc
197 .callinfo NO_CALLS
198 .entry
199
200 /* Last destination address */
201 add dst,len,end
202
203 /* short copy with less than 16 bytes? */
204 cmpib,COND(>>=),n 15,len,.Lbyte_loop
205
206 /* same alignment? */
207 xor src,dst,t0
208 extru t0,31,2,t1
209 cmpib,<>,n 0,t1,.Lunaligned_copy
210
211#ifdef CONFIG_64BIT
212 /* only do 64-bit copies if we can get aligned. */
213 extru t0,31,3,t1
214 cmpib,<>,n 0,t1,.Lalign_loop32
215
216 /* loop until we are 64-bit aligned */
217.Lalign_loop64:
218 extru dst,31,3,t1
219 cmpib,=,n 0,t1,.Lcopy_loop_16_start
22020: ldb,ma 1(srcspc,src),t1
22121: stb,ma t1,1(dstspc,dst)
222 b .Lalign_loop64
223 ldo -1(len),len
224
225 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
226 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
227
228.Lcopy_loop_16_start:
229 ldi 31,t0
230.Lcopy_loop_16:
231 cmpb,COND(>>=),n t0,len,.Lword_loop
232
23310: ldd 0(srcspc,src),t1
23411: ldd 8(srcspc,src),t2
235 ldo 16(src),src
23612: std,ma t1,8(dstspc,dst)
23713: std,ma t2,8(dstspc,dst)
23814: ldd 0(srcspc,src),t1
23915: ldd 8(srcspc,src),t2
240 ldo 16(src),src
24116: std,ma t1,8(dstspc,dst)
24217: std,ma t2,8(dstspc,dst)
243
244 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
245 ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy16_fault)
246 ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
247 ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
248 ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
249 ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy16_fault)
250 ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
251 ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
252
253 b .Lcopy_loop_16
254 ldo -32(len),len
255
256.Lword_loop:
257 cmpib,COND(>>=),n 3,len,.Lbyte_loop
25820: ldw,ma 4(srcspc,src),t1
25921: stw,ma t1,4(dstspc,dst)
260 b .Lword_loop
261 ldo -4(len),len
262
263 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
264 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
265
266#endif /* CONFIG_64BIT */
267
268 /* loop until we are 32-bit aligned */
269.Lalign_loop32:
270 extru dst,31,2,t1
271 cmpib,=,n 0,t1,.Lcopy_loop_8
27220: ldb,ma 1(srcspc,src),t1
27321: stb,ma t1,1(dstspc,dst)
274 b .Lalign_loop32
275 ldo -1(len),len
276
277 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
278 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
279
280
281.Lcopy_loop_8:
282 cmpib,COND(>>=),n 15,len,.Lbyte_loop
283
28410: ldw 0(srcspc,src),t1
28511: ldw 4(srcspc,src),t2
28612: stw,ma t1,4(dstspc,dst)
28713: stw,ma t2,4(dstspc,dst)
28814: ldw 8(srcspc,src),t1
28915: ldw 12(srcspc,src),t2
290 ldo 16(src),src
29116: stw,ma t1,4(dstspc,dst)
29217: stw,ma t2,4(dstspc,dst)
293
294 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
295 ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy8_fault)
296 ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
297 ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
298 ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
299 ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy8_fault)
300 ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
301 ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
302
303 b .Lcopy_loop_8
304 ldo -16(len),len
305
306.Lbyte_loop:
307 cmpclr,COND(<>) len,%r0,%r0
308 b,n .Lcopy_done
30920: ldb 0(srcspc,src),t1
310 ldo 1(src),src
31121: stb,ma t1,1(dstspc,dst)
312 b .Lbyte_loop
313 ldo -1(len),len
314
315 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
316 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
317
318.Lcopy_done:
319 bv %r0(%r2)
320 sub end,dst,ret0
321
322
323 /* src and dst are not aligned the same way. */
324 /* need to go the hard way */
325.Lunaligned_copy:
326 /* align until dst is 32bit-word-aligned */
327 extru dst,31,2,t1
328 cmpib,=,n 0,t1,.Lcopy_dstaligned
32920: ldb 0(srcspc,src),t1
330 ldo 1(src),src
33121: stb,ma t1,1(dstspc,dst)
332 b .Lunaligned_copy
333 ldo -1(len),len
334
335 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
336 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
337
338.Lcopy_dstaligned:
339
340 /* store src, dst and len in safe place */
341 copy src,save_src
342 copy dst,save_dst
343 copy len,save_len
344
345 /* len now needs give number of words to copy */
346 SHRREG len,2,len
347
348 /*
349 * Copy from a not-aligned src to an aligned dst using shifts.
350 * Handles 4 words per loop.
351 */
352
353 depw,z src,28,2,t0
354 subi 32,t0,t0
355 mtsar t0
356 extru len,31,2,t0
357 cmpib,= 2,t0,.Lcase2
358 /* Make src aligned by rounding it down. */
359 depi 0,31,2,src
360
361 cmpiclr,<> 3,t0,%r0
362 b,n .Lcase3
363 cmpiclr,<> 1,t0,%r0
364 b,n .Lcase1
365.Lcase0:
366 cmpb,COND(=) %r0,len,.Lcda_finish
367 nop
368
3691: ldw,ma 4(srcspc,src), a3
370 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
3711: ldw,ma 4(srcspc,src), a0
372 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
373 b,n .Ldo3
374.Lcase1:
3751: ldw,ma 4(srcspc,src), a2
376 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
3771: ldw,ma 4(srcspc,src), a3
378 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
379 ldo -1(len),len
380 cmpb,COND(=),n %r0,len,.Ldo0
381.Ldo4:
3821: ldw,ma 4(srcspc,src), a0
383 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
384 shrpw a2, a3, %sar, t0
3851: stw,ma t0, 4(dstspc,dst)
386 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
387.Ldo3:
3881: ldw,ma 4(srcspc,src), a1
389 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
390 shrpw a3, a0, %sar, t0
3911: stw,ma t0, 4(dstspc,dst)
392 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
393.Ldo2:
3941: ldw,ma 4(srcspc,src), a2
395 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
396 shrpw a0, a1, %sar, t0
3971: stw,ma t0, 4(dstspc,dst)
398 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
399.Ldo1:
4001: ldw,ma 4(srcspc,src), a3
401 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
402 shrpw a1, a2, %sar, t0
4031: stw,ma t0, 4(dstspc,dst)
404 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
405 ldo -4(len),len
406 cmpb,COND(<>) %r0,len,.Ldo4
407 nop
408.Ldo0:
409 shrpw a2, a3, %sar, t0
4101: stw,ma t0, 4(dstspc,dst)
411 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
412
413.Lcda_rdfault:
414.Lcda_finish:
415 /* calculate new src, dst and len and jump to byte-copy loop */
416 sub dst,save_dst,t0
417 add save_src,t0,src
418 b .Lbyte_loop
419 sub save_len,t0,len
420
421.Lcase3:
4221: ldw,ma 4(srcspc,src), a0
423 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
4241: ldw,ma 4(srcspc,src), a1
425 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
426 b .Ldo2
427 ldo 1(len),len
428.Lcase2:
4291: ldw,ma 4(srcspc,src), a1
430 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
4311: ldw,ma 4(srcspc,src), a2
432 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
433 b .Ldo1
434 ldo 2(len),len
435
436
437 /* fault exception fixup handlers: */
438#ifdef CONFIG_64BIT
439.Lcopy16_fault:
440 b .Lcopy_done
44110: std,ma t1,8(dstspc,dst)
442 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
443#endif
444
445.Lcopy8_fault:
446 b .Lcopy_done
44710: stw,ma t1,4(dstspc,dst)
448 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
449
450 .exit
451ENDPROC_CFI(pa_memcpy)
452 .procend
453
135 .end 454 .end
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index f82ff10ed974..b3d47ec1d80a 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -2,7 +2,7 @@
2 * Optimized memory copy routines. 2 * Optimized memory copy routines.
3 * 3 *
4 * Copyright (C) 2004 Randolph Chung <tausq@debian.org> 4 * Copyright (C) 2004 Randolph Chung <tausq@debian.org>
5 * Copyright (C) 2013 Helge Deller <deller@gmx.de> 5 * Copyright (C) 2013-2017 Helge Deller <deller@gmx.de>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -21,474 +21,21 @@
21 * Portions derived from the GNU C Library 21 * Portions derived from the GNU C Library
22 * Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc. 22 * Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc.
23 * 23 *
24 * Several strategies are tried to try to get the best performance for various
25 * conditions. In the optimal case, we copy 64-bytes in an unrolled loop using
26 * fp regs. This is followed by loops that copy 32- or 16-bytes at a time using
27 * general registers. Unaligned copies are handled either by aligning the
28 * destination and then using shift-and-write method, or in a few cases by
29 * falling back to a byte-at-a-time copy.
30 *
31 * I chose to implement this in C because it is easier to maintain and debug,
32 * and in my experiments it appears that the C code generated by gcc (3.3/3.4
33 * at the time of writing) is fairly optimal. Unfortunately some of the
34 * semantics of the copy routine (exception handling) is difficult to express
35 * in C, so we have to play some tricks to get it to work.
36 *
37 * All the loads and stores are done via explicit asm() code in order to use
38 * the right space registers.
39 *
40 * Testing with various alignments and buffer sizes shows that this code is
41 * often >10x faster than a simple byte-at-a-time copy, even for strangely
42 * aligned operands. It is interesting to note that the glibc version
43 * of memcpy (written in C) is actually quite fast already. This routine is
44 * able to beat it by 30-40% for aligned copies because of the loop unrolling,
45 * but in some cases the glibc version is still slightly faster. This lends
46 * more credibility that gcc can generate very good code as long as we are
47 * careful.
48 *
49 * TODO:
50 * - cache prefetching needs more experimentation to get optimal settings
51 * - try not to use the post-increment address modifiers; they create additional
52 * interlocks
53 * - replace byte-copy loops with stybs sequences
54 */ 24 */
55 25
56#ifdef __KERNEL__
57#include <linux/module.h> 26#include <linux/module.h>
58#include <linux/compiler.h> 27#include <linux/compiler.h>
59#include <linux/uaccess.h> 28#include <linux/uaccess.h>
60#define s_space "%%sr1"
61#define d_space "%%sr2"
62#else
63#include "memcpy.h"
64#define s_space "%%sr0"
65#define d_space "%%sr0"
66#define pa_memcpy new2_copy
67#endif
68 29
69DECLARE_PER_CPU(struct exception_data, exception_data); 30DECLARE_PER_CPU(struct exception_data, exception_data);
70 31
71#define preserve_branch(label) do { \
72 volatile int dummy = 0; \
73 /* The following branch is never taken, it's just here to */ \
74 /* prevent gcc from optimizing away our exception code. */ \
75 if (unlikely(dummy != dummy)) \
76 goto label; \
77} while (0)
78
79#define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3)) 32#define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3))
80#define get_kernel_space() (0) 33#define get_kernel_space() (0)
81 34
82#define MERGE(w0, sh_1, w1, sh_2) ({ \
83 unsigned int _r; \
84 asm volatile ( \
85 "mtsar %3\n" \
86 "shrpw %1, %2, %%sar, %0\n" \
87 : "=r"(_r) \
88 : "r"(w0), "r"(w1), "r"(sh_2) \
89 ); \
90 _r; \
91})
92#define THRESHOLD 16
93
94#ifdef DEBUG_MEMCPY
95#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
96#else
97#define DPRINTF(fmt, args...)
98#endif
99
100#define def_load_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
101 __asm__ __volatile__ ( \
102 "1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n\t" \
103 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
104 : _tt(_t), "+r"(_a) \
105 : \
106 : "r8")
107
108#define def_store_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
109 __asm__ __volatile__ ( \
110 "1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n\t" \
111 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
112 : "+r"(_a) \
113 : _tt(_t) \
114 : "r8")
115
116#define ldbma(_s, _a, _t, _e) def_load_ai_insn(ldbs,1,"=r",_s,_a,_t,_e)
117#define stbma(_s, _t, _a, _e) def_store_ai_insn(stbs,1,"r",_s,_a,_t,_e)
118#define ldwma(_s, _a, _t, _e) def_load_ai_insn(ldw,4,"=r",_s,_a,_t,_e)
119#define stwma(_s, _t, _a, _e) def_store_ai_insn(stw,4,"r",_s,_a,_t,_e)
120#define flddma(_s, _a, _t, _e) def_load_ai_insn(fldd,8,"=f",_s,_a,_t,_e)
121#define fstdma(_s, _t, _a, _e) def_store_ai_insn(fstd,8,"f",_s,_a,_t,_e)
122
123#define def_load_insn(_insn,_tt,_s,_o,_a,_t,_e) \
124 __asm__ __volatile__ ( \
125 "1:\t" #_insn " " #_o "(" _s ",%1), %0\n\t" \
126 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
127 : _tt(_t) \
128 : "r"(_a) \
129 : "r8")
130
131#define def_store_insn(_insn,_tt,_s,_t,_o,_a,_e) \
132 __asm__ __volatile__ ( \
133 "1:\t" #_insn " %0, " #_o "(" _s ",%1)\n\t" \
134 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
135 : \
136 : _tt(_t), "r"(_a) \
137 : "r8")
138
139#define ldw(_s,_o,_a,_t,_e) def_load_insn(ldw,"=r",_s,_o,_a,_t,_e)
140#define stw(_s,_t,_o,_a,_e) def_store_insn(stw,"r",_s,_t,_o,_a,_e)
141
142#ifdef CONFIG_PREFETCH
143static inline void prefetch_src(const void *addr)
144{
145 __asm__("ldw 0(" s_space ",%0), %%r0" : : "r" (addr));
146}
147
148static inline void prefetch_dst(const void *addr)
149{
150 __asm__("ldd 0(" d_space ",%0), %%r0" : : "r" (addr));
151}
152#else
153#define prefetch_src(addr) do { } while(0)
154#define prefetch_dst(addr) do { } while(0)
155#endif
156
157#define PA_MEMCPY_OK 0
158#define PA_MEMCPY_LOAD_ERROR 1
159#define PA_MEMCPY_STORE_ERROR 2
160
161/* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words
162 * per loop. This code is derived from glibc.
163 */
164static noinline unsigned long copy_dstaligned(unsigned long dst,
165 unsigned long src, unsigned long len)
166{
167 /* gcc complains that a2 and a3 may be uninitialized, but actually
168 * they cannot be. Initialize a2/a3 to shut gcc up.
169 */
170 register unsigned int a0, a1, a2 = 0, a3 = 0;
171 int sh_1, sh_2;
172
173 /* prefetch_src((const void *)src); */
174
175 /* Calculate how to shift a word read at the memory operation
176 aligned srcp to make it aligned for copy. */
177 sh_1 = 8 * (src % sizeof(unsigned int));
178 sh_2 = 8 * sizeof(unsigned int) - sh_1;
179
180 /* Make src aligned by rounding it down. */
181 src &= -sizeof(unsigned int);
182
183 switch (len % 4)
184 {
185 case 2:
186 /* a1 = ((unsigned int *) src)[0];
187 a2 = ((unsigned int *) src)[1]; */
188 ldw(s_space, 0, src, a1, cda_ldw_exc);
189 ldw(s_space, 4, src, a2, cda_ldw_exc);
190 src -= 1 * sizeof(unsigned int);
191 dst -= 3 * sizeof(unsigned int);
192 len += 2;
193 goto do1;
194 case 3:
195 /* a0 = ((unsigned int *) src)[0];
196 a1 = ((unsigned int *) src)[1]; */
197 ldw(s_space, 0, src, a0, cda_ldw_exc);
198 ldw(s_space, 4, src, a1, cda_ldw_exc);
199 src -= 0 * sizeof(unsigned int);
200 dst -= 2 * sizeof(unsigned int);
201 len += 1;
202 goto do2;
203 case 0:
204 if (len == 0)
205 return PA_MEMCPY_OK;
206 /* a3 = ((unsigned int *) src)[0];
207 a0 = ((unsigned int *) src)[1]; */
208 ldw(s_space, 0, src, a3, cda_ldw_exc);
209 ldw(s_space, 4, src, a0, cda_ldw_exc);
210 src -=-1 * sizeof(unsigned int);
211 dst -= 1 * sizeof(unsigned int);
212 len += 0;
213 goto do3;
214 case 1:
215 /* a2 = ((unsigned int *) src)[0];
216 a3 = ((unsigned int *) src)[1]; */
217 ldw(s_space, 0, src, a2, cda_ldw_exc);
218 ldw(s_space, 4, src, a3, cda_ldw_exc);
219 src -=-2 * sizeof(unsigned int);
220 dst -= 0 * sizeof(unsigned int);
221 len -= 1;
222 if (len == 0)
223 goto do0;
224 goto do4; /* No-op. */
225 }
226
227 do
228 {
229 /* prefetch_src((const void *)(src + 4 * sizeof(unsigned int))); */
230do4:
231 /* a0 = ((unsigned int *) src)[0]; */
232 ldw(s_space, 0, src, a0, cda_ldw_exc);
233 /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
234 stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
235do3:
236 /* a1 = ((unsigned int *) src)[1]; */
237 ldw(s_space, 4, src, a1, cda_ldw_exc);
238 /* ((unsigned int *) dst)[1] = MERGE (a3, sh_1, a0, sh_2); */
239 stw(d_space, MERGE (a3, sh_1, a0, sh_2), 4, dst, cda_stw_exc);
240do2:
241 /* a2 = ((unsigned int *) src)[2]; */
242 ldw(s_space, 8, src, a2, cda_ldw_exc);
243 /* ((unsigned int *) dst)[2] = MERGE (a0, sh_1, a1, sh_2); */
244 stw(d_space, MERGE (a0, sh_1, a1, sh_2), 8, dst, cda_stw_exc);
245do1:
246 /* a3 = ((unsigned int *) src)[3]; */
247 ldw(s_space, 12, src, a3, cda_ldw_exc);
248 /* ((unsigned int *) dst)[3] = MERGE (a1, sh_1, a2, sh_2); */
249 stw(d_space, MERGE (a1, sh_1, a2, sh_2), 12, dst, cda_stw_exc);
250
251 src += 4 * sizeof(unsigned int);
252 dst += 4 * sizeof(unsigned int);
253 len -= 4;
254 }
255 while (len != 0);
256
257do0:
258 /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
259 stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
260
261 preserve_branch(handle_load_error);
262 preserve_branch(handle_store_error);
263
264 return PA_MEMCPY_OK;
265
266handle_load_error:
267 __asm__ __volatile__ ("cda_ldw_exc:\n");
268 return PA_MEMCPY_LOAD_ERROR;
269
270handle_store_error:
271 __asm__ __volatile__ ("cda_stw_exc:\n");
272 return PA_MEMCPY_STORE_ERROR;
273}
274
275
276/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR.
277 * In case of an access fault the faulty address can be read from the per_cpu
278 * exception data struct. */
279static noinline unsigned long pa_memcpy_internal(void *dstp, const void *srcp,
280 unsigned long len)
281{
282 register unsigned long src, dst, t1, t2, t3;
283 register unsigned char *pcs, *pcd;
284 register unsigned int *pws, *pwd;
285 register double *pds, *pdd;
286 unsigned long ret;
287
288 src = (unsigned long)srcp;
289 dst = (unsigned long)dstp;
290 pcs = (unsigned char *)srcp;
291 pcd = (unsigned char *)dstp;
292
293 /* prefetch_src((const void *)srcp); */
294
295 if (len < THRESHOLD)
296 goto byte_copy;
297
298 /* Check alignment */
299 t1 = (src ^ dst);
300 if (unlikely(t1 & (sizeof(double)-1)))
301 goto unaligned_copy;
302
303 /* src and dst have same alignment. */
304
305 /* Copy bytes till we are double-aligned. */
306 t2 = src & (sizeof(double) - 1);
307 if (unlikely(t2 != 0)) {
308 t2 = sizeof(double) - t2;
309 while (t2 && len) {
310 /* *pcd++ = *pcs++; */
311 ldbma(s_space, pcs, t3, pmc_load_exc);
312 len--;
313 stbma(d_space, t3, pcd, pmc_store_exc);
314 t2--;
315 }
316 }
317
318 pds = (double *)pcs;
319 pdd = (double *)pcd;
320
321#if 0
322 /* Copy 8 doubles at a time */
323 while (len >= 8*sizeof(double)) {
324 register double r1, r2, r3, r4, r5, r6, r7, r8;
325 /* prefetch_src((char *)pds + L1_CACHE_BYTES); */
326 flddma(s_space, pds, r1, pmc_load_exc);
327 flddma(s_space, pds, r2, pmc_load_exc);
328 flddma(s_space, pds, r3, pmc_load_exc);
329 flddma(s_space, pds, r4, pmc_load_exc);
330 fstdma(d_space, r1, pdd, pmc_store_exc);
331 fstdma(d_space, r2, pdd, pmc_store_exc);
332 fstdma(d_space, r3, pdd, pmc_store_exc);
333 fstdma(d_space, r4, pdd, pmc_store_exc);
334
335#if 0
336 if (L1_CACHE_BYTES <= 32)
337 prefetch_src((char *)pds + L1_CACHE_BYTES);
338#endif
339 flddma(s_space, pds, r5, pmc_load_exc);
340 flddma(s_space, pds, r6, pmc_load_exc);
341 flddma(s_space, pds, r7, pmc_load_exc);
342 flddma(s_space, pds, r8, pmc_load_exc);
343 fstdma(d_space, r5, pdd, pmc_store_exc);
344 fstdma(d_space, r6, pdd, pmc_store_exc);
345 fstdma(d_space, r7, pdd, pmc_store_exc);
346 fstdma(d_space, r8, pdd, pmc_store_exc);
347 len -= 8*sizeof(double);
348 }
349#endif
350
351 pws = (unsigned int *)pds;
352 pwd = (unsigned int *)pdd;
353
354word_copy:
355 while (len >= 8*sizeof(unsigned int)) {
356 register unsigned int r1,r2,r3,r4,r5,r6,r7,r8;
357 /* prefetch_src((char *)pws + L1_CACHE_BYTES); */
358 ldwma(s_space, pws, r1, pmc_load_exc);
359 ldwma(s_space, pws, r2, pmc_load_exc);
360 ldwma(s_space, pws, r3, pmc_load_exc);
361 ldwma(s_space, pws, r4, pmc_load_exc);
362 stwma(d_space, r1, pwd, pmc_store_exc);
363 stwma(d_space, r2, pwd, pmc_store_exc);
364 stwma(d_space, r3, pwd, pmc_store_exc);
365 stwma(d_space, r4, pwd, pmc_store_exc);
366
367 ldwma(s_space, pws, r5, pmc_load_exc);
368 ldwma(s_space, pws, r6, pmc_load_exc);
369 ldwma(s_space, pws, r7, pmc_load_exc);
370 ldwma(s_space, pws, r8, pmc_load_exc);
371 stwma(d_space, r5, pwd, pmc_store_exc);
372 stwma(d_space, r6, pwd, pmc_store_exc);
373 stwma(d_space, r7, pwd, pmc_store_exc);
374 stwma(d_space, r8, pwd, pmc_store_exc);
375 len -= 8*sizeof(unsigned int);
376 }
377
378 while (len >= 4*sizeof(unsigned int)) {
379 register unsigned int r1,r2,r3,r4;
380 ldwma(s_space, pws, r1, pmc_load_exc);
381 ldwma(s_space, pws, r2, pmc_load_exc);
382 ldwma(s_space, pws, r3, pmc_load_exc);
383 ldwma(s_space, pws, r4, pmc_load_exc);
384 stwma(d_space, r1, pwd, pmc_store_exc);
385 stwma(d_space, r2, pwd, pmc_store_exc);
386 stwma(d_space, r3, pwd, pmc_store_exc);
387 stwma(d_space, r4, pwd, pmc_store_exc);
388 len -= 4*sizeof(unsigned int);
389 }
390
391 pcs = (unsigned char *)pws;
392 pcd = (unsigned char *)pwd;
393
394byte_copy:
395 while (len) {
396 /* *pcd++ = *pcs++; */
397 ldbma(s_space, pcs, t3, pmc_load_exc);
398 stbma(d_space, t3, pcd, pmc_store_exc);
399 len--;
400 }
401
402 return PA_MEMCPY_OK;
403
404unaligned_copy:
405 /* possibly we are aligned on a word, but not on a double... */
406 if (likely((t1 & (sizeof(unsigned int)-1)) == 0)) {
407 t2 = src & (sizeof(unsigned int) - 1);
408
409 if (unlikely(t2 != 0)) {
410 t2 = sizeof(unsigned int) - t2;
411 while (t2) {
412 /* *pcd++ = *pcs++; */
413 ldbma(s_space, pcs, t3, pmc_load_exc);
414 stbma(d_space, t3, pcd, pmc_store_exc);
415 len--;
416 t2--;
417 }
418 }
419
420 pws = (unsigned int *)pcs;
421 pwd = (unsigned int *)pcd;
422 goto word_copy;
423 }
424
425 /* Align the destination. */
426 if (unlikely((dst & (sizeof(unsigned int) - 1)) != 0)) {
427 t2 = sizeof(unsigned int) - (dst & (sizeof(unsigned int) - 1));
428 while (t2) {
429 /* *pcd++ = *pcs++; */
430 ldbma(s_space, pcs, t3, pmc_load_exc);
431 stbma(d_space, t3, pcd, pmc_store_exc);
432 len--;
433 t2--;
434 }
435 dst = (unsigned long)pcd;
436 src = (unsigned long)pcs;
437 }
438
439 ret = copy_dstaligned(dst, src, len / sizeof(unsigned int));
440 if (ret)
441 return ret;
442
443 pcs += (len & -sizeof(unsigned int));
444 pcd += (len & -sizeof(unsigned int));
445 len %= sizeof(unsigned int);
446
447 preserve_branch(handle_load_error);
448 preserve_branch(handle_store_error);
449
450 goto byte_copy;
451
452handle_load_error:
453 __asm__ __volatile__ ("pmc_load_exc:\n");
454 return PA_MEMCPY_LOAD_ERROR;
455
456handle_store_error:
457 __asm__ __volatile__ ("pmc_store_exc:\n");
458 return PA_MEMCPY_STORE_ERROR;
459}
460
461
462/* Returns 0 for success, otherwise, returns number of bytes not transferred. */ 35/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
463static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) 36extern unsigned long pa_memcpy(void *dst, const void *src,
464{ 37 unsigned long len);
465 unsigned long ret, fault_addr, reference;
466 struct exception_data *d;
467
468 ret = pa_memcpy_internal(dstp, srcp, len);
469 if (likely(ret == PA_MEMCPY_OK))
470 return 0;
471
472 /* if a load or store fault occured we can get the faulty addr */
473 d = this_cpu_ptr(&exception_data);
474 fault_addr = d->fault_addr;
475
476 /* error in load or store? */
477 if (ret == PA_MEMCPY_LOAD_ERROR)
478 reference = (unsigned long) srcp;
479 else
480 reference = (unsigned long) dstp;
481 38
482 DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n",
483 ret, len, fault_addr, reference);
484
485 if (fault_addr >= reference)
486 return len - (fault_addr - reference);
487 else
488 return len;
489}
490
491#ifdef __KERNEL__
492unsigned long __copy_to_user(void __user *dst, const void *src, 39unsigned long __copy_to_user(void __user *dst, const void *src,
493 unsigned long len) 40 unsigned long len)
494{ 41{
@@ -537,5 +84,3 @@ long probe_kernel_read(void *dst, const void *src, size_t size)
537 84
538 return __probe_kernel_read(dst, src, size); 85 return __probe_kernel_read(dst, src, size);
539} 86}
540
541#endif
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index deab89a8915a..32ec22146141 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -150,6 +150,23 @@ int fixup_exception(struct pt_regs *regs)
150 d->fault_space = regs->isr; 150 d->fault_space = regs->isr;
151 d->fault_addr = regs->ior; 151 d->fault_addr = regs->ior;
152 152
153 /*
154 * Fix up get_user() and put_user().
155 * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
156 * bit in the relative address of the fixup routine to indicate
157 * that %r8 should be loaded with -EFAULT to report a userspace
158 * access error.
159 */
160 if (fix->fixup & 1) {
161 regs->gr[8] = -EFAULT;
162
163 /* zero target register for get_user() */
164 if (parisc_acctyp(0, regs->iir) == VM_READ) {
165 int treg = regs->iir & 0x1f;
166 regs->gr[treg] = 0;
167 }
168 }
169
153 regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup; 170 regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup;
154 regs->iaoq[0] &= ~3; 171 regs->iaoq[0] &= ~3;
155 /* 172 /*
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
index 411994551afc..f058e0c3e4d4 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -33,10 +33,13 @@ static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len)
33 } 33 }
34 34
35 if (len & ~VMX_ALIGN_MASK) { 35 if (len & ~VMX_ALIGN_MASK) {
36 preempt_disable();
36 pagefault_disable(); 37 pagefault_disable();
37 enable_kernel_altivec(); 38 enable_kernel_altivec();
38 crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK); 39 crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
40 disable_kernel_altivec();
39 pagefault_enable(); 41 pagefault_enable();
42 preempt_enable();
40 } 43 }
41 44
42 tail = len & VMX_ALIGN_MASK; 45 tail = len & VMX_ALIGN_MASK;
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 14752eee3d0c..ed3beadd2cc5 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -236,9 +236,9 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
236 mtctr reg; \ 236 mtctr reg; \
237 bctr 237 bctr
238 238
239#define BRANCH_LINK_TO_FAR(reg, label) \ 239#define BRANCH_LINK_TO_FAR(label) \
240 __LOAD_FAR_HANDLER(reg, label); \ 240 __LOAD_FAR_HANDLER(r12, label); \
241 mtctr reg; \ 241 mtctr r12; \
242 bctrl 242 bctrl
243 243
244/* 244/*
@@ -265,7 +265,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
265#define BRANCH_TO_COMMON(reg, label) \ 265#define BRANCH_TO_COMMON(reg, label) \
266 b label 266 b label
267 267
268#define BRANCH_LINK_TO_FAR(reg, label) \ 268#define BRANCH_LINK_TO_FAR(label) \
269 bl label 269 bl label
270 270
271#define BRANCH_TO_KVM(reg, label) \ 271#define BRANCH_TO_KVM(reg, label) \
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index cbc7c42cdb74..ec7a8b099dd9 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -807,14 +807,25 @@ int fix_alignment(struct pt_regs *regs)
807 nb = aligninfo[instr].len; 807 nb = aligninfo[instr].len;
808 flags = aligninfo[instr].flags; 808 flags = aligninfo[instr].flags;
809 809
810 /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */ 810 /*
811 if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) { 811 * Handle some cases which give overlaps in the DSISR values.
812 nb = 8; 812 */
813 flags = LD+SW; 813 if (IS_XFORM(instruction)) {
814 } else if (IS_XFORM(instruction) && 814 switch (get_xop(instruction)) {
815 ((instruction >> 1) & 0x3ff) == 660) { 815 case 532: /* ldbrx */
816 nb = 8; 816 nb = 8;
817 flags = ST+SW; 817 flags = LD+SW;
818 break;
819 case 660: /* stdbrx */
820 nb = 8;
821 flags = ST+SW;
822 break;
823 case 20: /* lwarx */
824 case 84: /* ldarx */
825 case 116: /* lharx */
826 case 276: /* lqarx */
827 return 0; /* not emulated ever */
828 }
818 } 829 }
819 830
820 /* Byteswap little endian loads and stores */ 831 /* Byteswap little endian loads and stores */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 6432d4bf08c8..767ef6d68c9e 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -689,7 +689,7 @@ resume_kernel:
689 689
690 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ 690 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
691 691
692 lwz r3,GPR1(r1) 692 ld r3,GPR1(r1)
693 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */ 693 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
694 mr r4,r1 /* src: current exception frame */ 694 mr r4,r1 /* src: current exception frame */
695 mr r1,r3 /* Reroute the trampoline frame to r1 */ 695 mr r1,r3 /* Reroute the trampoline frame to r1 */
@@ -703,8 +703,8 @@ resume_kernel:
703 addi r6,r6,8 703 addi r6,r6,8
704 bdnz 2b 704 bdnz 2b
705 705
706 /* Do real store operation to complete stwu */ 706 /* Do real store operation to complete stdu */
707 lwz r5,GPR1(r1) 707 ld r5,GPR1(r1)
708 std r8,0(r5) 708 std r8,0(r5)
709 709
710 /* Clear _TIF_EMULATE_STACK_STORE flag */ 710 /* Clear _TIF_EMULATE_STACK_STORE flag */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 857bf7c5b946..6353019966e6 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -982,7 +982,7 @@ TRAMP_REAL_BEGIN(hmi_exception_early)
982 EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN) 982 EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
983 EXCEPTION_PROLOG_COMMON_3(0xe60) 983 EXCEPTION_PROLOG_COMMON_3(0xe60)
984 addi r3,r1,STACK_FRAME_OVERHEAD 984 addi r3,r1,STACK_FRAME_OVERHEAD
985 BRANCH_LINK_TO_FAR(r4, hmi_exception_realmode) 985 BRANCH_LINK_TO_FAR(hmi_exception_realmode) /* Function call ABI */
986 /* Windup the stack. */ 986 /* Windup the stack. */
987 /* Move original HSRR0 and HSRR1 into the respective regs */ 987 /* Move original HSRR0 and HSRR1 into the respective regs */
988 ld r9,_MSR(r1) 988 ld r9,_MSR(r1)
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 995728736677..6fd08219248d 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -449,9 +449,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
449_GLOBAL(pnv_wakeup_tb_loss) 449_GLOBAL(pnv_wakeup_tb_loss)
450 ld r1,PACAR1(r13) 450 ld r1,PACAR1(r13)
451 /* 451 /*
452 * Before entering any idle state, the NVGPRs are saved in the stack 452 * Before entering any idle state, the NVGPRs are saved in the stack.
453 * and they are restored before switching to the process context. Hence 453 * If there was a state loss, or PACA_NAPSTATELOST was set, then the
454 * until they are restored, they are free to be used. 454 * NVGPRs are restored. If we are here, it is likely that state is lost,
455 * but not guaranteed -- neither ISA207 nor ISA300 tests to reach
456 * here are the same as the test to restore NVGPRS:
457 * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300,
458 * and SRR1 test for restoring NVGPRs.
459 *
460 * We are about to clobber NVGPRs now, so set NAPSTATELOST to
461 * guarantee they will always be restored. This might be tightened
462 * with careful reading of specs (particularly for ISA300) but this
463 * is already a slow wakeup path and it's simpler to be safe.
464 */
465 li r0,1
466 stb r0,PACA_NAPSTATELOST(r13)
467
468 /*
455 * 469 *
456 * Save SRR1 and LR in NVGPRs as they might be clobbered in 470 * Save SRR1 and LR in NVGPRs as they might be clobbered in
457 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required 471 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index ae179cb1bb3c..c119044cad0d 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -67,7 +67,7 @@ PPC64_CACHES:
67 * flush all bytes from start through stop-1 inclusive 67 * flush all bytes from start through stop-1 inclusive
68 */ 68 */
69 69
70_GLOBAL(flush_icache_range) 70_GLOBAL_TOC(flush_icache_range)
71BEGIN_FTR_SECTION 71BEGIN_FTR_SECTION
72 PURGE_PREFETCHED_INS 72 PURGE_PREFETCHED_INS
73 blr 73 blr
@@ -120,7 +120,7 @@ EXPORT_SYMBOL(flush_icache_range)
120 * 120 *
121 * flush all bytes from start to stop-1 inclusive 121 * flush all bytes from start to stop-1 inclusive
122 */ 122 */
123_GLOBAL(flush_dcache_range) 123_GLOBAL_TOC(flush_dcache_range)
124 124
125/* 125/*
126 * Flush the data cache to memory 126 * Flush the data cache to memory
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 9cfaa8b69b5f..f997154dfc41 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -236,6 +236,15 @@ static void cpu_ready_for_interrupts(void)
236 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); 236 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
237 } 237 }
238 238
239 /*
240 * Fixup HFSCR:TM based on CPU features. The bit is set by our
241 * early asm init because at that point we haven't updated our
242 * CPU features from firmware and device-tree. Here we have,
243 * so let's do it.
244 */
245 if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
246 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
247
239 /* Set IR and DR in PACA MSR */ 248 /* Set IR and DR in PACA MSR */
240 get_paca()->kernel_msr = MSR_KERNEL; 249 get_paca()->kernel_msr = MSR_KERNEL;
241} 250}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 8c68145ba1bd..710e491206ed 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -1487,6 +1487,10 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
1487 /* start new resize */ 1487 /* start new resize */
1488 1488
1489 resize = kzalloc(sizeof(*resize), GFP_KERNEL); 1489 resize = kzalloc(sizeof(*resize), GFP_KERNEL);
1490 if (!resize) {
1491 ret = -ENOMEM;
1492 goto out;
1493 }
1490 resize->order = shift; 1494 resize->order = shift;
1491 resize->kvm = kvm; 1495 resize->kvm = kvm;
1492 INIT_WORK(&resize->work, resize_hpt_prepare_work); 1496 INIT_WORK(&resize->work, resize_hpt_prepare_work);
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index cc332608e656..65bb8f33b399 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -638,6 +638,10 @@ static void native_flush_hash_range(unsigned long number, int local)
638 unsigned long psize = batch->psize; 638 unsigned long psize = batch->psize;
639 int ssize = batch->ssize; 639 int ssize = batch->ssize;
640 int i; 640 int i;
641 unsigned int use_local;
642
643 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
644 mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
641 645
642 local_irq_save(flags); 646 local_irq_save(flags);
643 647
@@ -667,8 +671,7 @@ static void native_flush_hash_range(unsigned long number, int local)
667 } pte_iterate_hashed_end(); 671 } pte_iterate_hashed_end();
668 } 672 }
669 673
670 if (mmu_has_feature(MMU_FTR_TLBIEL) && 674 if (use_local) {
671 mmu_psize_defs[psize].tlbiel && local) {
672 asm volatile("ptesync":::"memory"); 675 asm volatile("ptesync":::"memory");
673 for (i = 0; i < number; i++) { 676 for (i = 0; i < number; i++) {
674 vpn = batch->vpn[i]; 677 vpn = batch->vpn[i];
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 9be992083d2a..c22f207aa656 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -397,8 +397,7 @@ static void early_check_vec5(void)
397void __init mmu_early_init_devtree(void) 397void __init mmu_early_init_devtree(void)
398{ 398{
399 /* Disable radix mode based on kernel command line. */ 399 /* Disable radix mode based on kernel command line. */
400 /* We don't yet have the machinery to do radix as a guest. */ 400 if (disable_radix)
401 if (disable_radix || !(mfmsr() & MSR_HV))
402 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; 401 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
403 402
404 /* 403 /*
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index fa95041fa9f6..33ca29333e18 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -141,31 +141,34 @@ static void check_ipl_parmblock(void *start, unsigned long size)
141 141
142unsigned long decompress_kernel(void) 142unsigned long decompress_kernel(void)
143{ 143{
144 unsigned long output_addr; 144 void *output, *kernel_end;
145 unsigned char *output;
146 145
147 output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL; 146 output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE);
148 check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start); 147 kernel_end = output + SZ__bss_start;
149 memset(&_bss, 0, &_ebss - &_bss); 148 check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
150 free_mem_ptr = (unsigned long)&_end;
151 free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
152 output = (unsigned char *) output_addr;
153 149
154#ifdef CONFIG_BLK_DEV_INITRD 150#ifdef CONFIG_BLK_DEV_INITRD
155 /* 151 /*
156 * Move the initrd right behind the end of the decompressed 152 * Move the initrd right behind the end of the decompressed
157 * kernel image. 153 * kernel image. This also prevents initrd corruption caused by
154 * bss clearing since kernel_end will always be located behind the
155 * current bss section..
158 */ 156 */
159 if (INITRD_START && INITRD_SIZE && 157 if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
160 INITRD_START < (unsigned long) output + SZ__bss_start) { 158 check_ipl_parmblock(kernel_end, INITRD_SIZE);
161 check_ipl_parmblock(output + SZ__bss_start, 159 memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
162 INITRD_START + INITRD_SIZE); 160 INITRD_START = (unsigned long) kernel_end;
163 memmove(output + SZ__bss_start,
164 (void *) INITRD_START, INITRD_SIZE);
165 INITRD_START = (unsigned long) output + SZ__bss_start;
166 } 161 }
167#endif 162#endif
168 163
164 /*
165 * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
166 * initialized afterwards since they reside in bss.
167 */
168 memset(&_bss, 0, &_ebss - &_bss);
169 free_mem_ptr = (unsigned long) &_end;
170 free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
171
169 puts("Uncompressing Linux... "); 172 puts("Uncompressing Linux... ");
170 __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error); 173 __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
171 puts("Ok, booting the kernel.\n"); 174 puts("Ok, booting the kernel.\n");
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 93e37b12e882..ecec682bb516 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1051,6 +1051,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1051{ 1051{
1052 if (!MACHINE_HAS_NX) 1052 if (!MACHINE_HAS_NX)
1053 pte_val(entry) &= ~_PAGE_NOEXEC; 1053 pte_val(entry) &= ~_PAGE_NOEXEC;
1054 if (pte_present(entry))
1055 pte_val(entry) &= ~_PAGE_UNUSED;
1054 if (mm_has_pgste(mm)) 1056 if (mm_has_pgste(mm))
1055 ptep_set_pte_at(mm, addr, ptep, entry); 1057 ptep_set_pte_at(mm, addr, ptep, entry);
1056 else 1058 else
diff --git a/arch/s390/include/asm/sections.h b/arch/s390/include/asm/sections.h
index 5ce29fe100ba..fbd9116eb17b 100644
--- a/arch/s390/include/asm/sections.h
+++ b/arch/s390/include/asm/sections.h
@@ -4,6 +4,5 @@
4#include <asm-generic/sections.h> 4#include <asm-generic/sections.h>
5 5
6extern char _eshared[], _ehead[]; 6extern char _eshared[], _ehead[];
7extern char __start_ro_after_init[], __end_ro_after_init[];
8 7
9#endif 8#endif
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 136932ff4250..3ea1554d04b3 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -147,7 +147,7 @@ unsigned long __must_check __copy_to_user(void __user *to, const void *from,
147 " jg 2b\n" \ 147 " jg 2b\n" \
148 ".popsection\n" \ 148 ".popsection\n" \
149 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \ 149 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
150 : "=d" (__rc), "=Q" (*(to)) \ 150 : "=d" (__rc), "+Q" (*(to)) \
151 : "d" (size), "Q" (*(from)), \ 151 : "d" (size), "Q" (*(from)), \
152 "d" (__reg0), "K" (-EFAULT) \ 152 "d" (__reg0), "K" (-EFAULT) \
153 : "cc"); \ 153 : "cc"); \
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 47a973b5b4f1..5dab859b0d54 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -909,13 +909,11 @@ void __init smp_prepare_boot_cpu(void)
909{ 909{
910 struct pcpu *pcpu = pcpu_devices; 910 struct pcpu *pcpu = pcpu_devices;
911 911
912 WARN_ON(!cpu_present(0) || !cpu_online(0));
912 pcpu->state = CPU_STATE_CONFIGURED; 913 pcpu->state = CPU_STATE_CONFIGURED;
913 pcpu->address = stap();
914 pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix(); 914 pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
915 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 915 S390_lowcore.percpu_offset = __per_cpu_offset[0];
916 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); 916 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
917 set_cpu_present(0, true);
918 set_cpu_online(0, true);
919} 917}
920 918
921void __init smp_cpus_done(unsigned int max_cpus) 919void __init smp_cpus_done(unsigned int max_cpus)
@@ -924,6 +922,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
924 922
925void __init smp_setup_processor_id(void) 923void __init smp_setup_processor_id(void)
926{ 924{
925 pcpu_devices[0].address = stap();
927 S390_lowcore.cpu_nr = 0; 926 S390_lowcore.cpu_nr = 0;
928 S390_lowcore.spinlock_lockval = arch_spin_lockval(0); 927 S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
929} 928}
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 5ccf95396251..72307f108c40 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -63,11 +63,9 @@ SECTIONS
63 63
64 . = ALIGN(PAGE_SIZE); 64 . = ALIGN(PAGE_SIZE);
65 __start_ro_after_init = .; 65 __start_ro_after_init = .;
66 __start_data_ro_after_init = .;
67 .data..ro_after_init : { 66 .data..ro_after_init : {
68 *(.data..ro_after_init) 67 *(.data..ro_after_init)
69 } 68 }
70 __end_data_ro_after_init = .;
71 EXCEPTION_TABLE(16) 69 EXCEPTION_TABLE(16)
72 . = ALIGN(PAGE_SIZE); 70 . = ALIGN(PAGE_SIZE);
73 __end_ro_after_init = .; 71 __end_ro_after_init = .;
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index d55c829a5944..ddbffb715b40 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -168,8 +168,7 @@ union page_table_entry {
168 unsigned long z : 1; /* Zero Bit */ 168 unsigned long z : 1; /* Zero Bit */
169 unsigned long i : 1; /* Page-Invalid Bit */ 169 unsigned long i : 1; /* Page-Invalid Bit */
170 unsigned long p : 1; /* DAT-Protection Bit */ 170 unsigned long p : 1; /* DAT-Protection Bit */
171 unsigned long co : 1; /* Change-Recording Override */ 171 unsigned long : 9;
172 unsigned long : 8;
173 }; 172 };
174}; 173};
175 174
@@ -745,8 +744,6 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
745 return PGM_PAGE_TRANSLATION; 744 return PGM_PAGE_TRANSLATION;
746 if (pte.z) 745 if (pte.z)
747 return PGM_TRANSLATION_SPEC; 746 return PGM_TRANSLATION_SPEC;
748 if (pte.co && !edat1)
749 return PGM_TRANSLATION_SPEC;
750 dat_protection |= pte.p; 747 dat_protection |= pte.p;
751 raddr.pfra = pte.pfra; 748 raddr.pfra = pte.pfra;
752real_address: 749real_address:
@@ -1182,7 +1179,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
1182 rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val); 1179 rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
1183 if (!rc && pte.i) 1180 if (!rc && pte.i)
1184 rc = PGM_PAGE_TRANSLATION; 1181 rc = PGM_PAGE_TRANSLATION;
1185 if (!rc && (pte.z || (pte.co && sg->edat_level < 1))) 1182 if (!rc && pte.z)
1186 rc = PGM_TRANSLATION_SPEC; 1183 rc = PGM_TRANSLATION_SPEC;
1187shadow_page: 1184shadow_page:
1188 pte.p |= dat_protection; 1185 pte.p |= dat_protection;
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 68ac5c7cd982..a59deaef21e5 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -43,7 +43,7 @@ config SPARC
43 select ARCH_HAS_SG_CHAIN 43 select ARCH_HAS_SG_CHAIN
44 select CPU_NO_EFFICIENT_FFS 44 select CPU_NO_EFFICIENT_FFS
45 select HAVE_ARCH_HARDENED_USERCOPY 45 select HAVE_ARCH_HARDENED_USERCOPY
46 select PROVE_LOCKING_SMALL if PROVE_LOCKING 46 select LOCKDEP_SMALL if LOCKDEP
47 select ARCH_WANT_RELAX_ORDER 47 select ARCH_WANT_RELAX_ORDER
48 48
49config SPARC32 49config SPARC32
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h
index f294dd42fc7d..5961b2d8398a 100644
--- a/arch/sparc/include/asm/page_64.h
+++ b/arch/sparc/include/asm/page_64.h
@@ -17,6 +17,7 @@
17 17
18#define HPAGE_SHIFT 23 18#define HPAGE_SHIFT 23
19#define REAL_HPAGE_SHIFT 22 19#define REAL_HPAGE_SHIFT 22
20#define HPAGE_2GB_SHIFT 31
20#define HPAGE_256MB_SHIFT 28 21#define HPAGE_256MB_SHIFT 28
21#define HPAGE_64K_SHIFT 16 22#define HPAGE_64K_SHIFT 16
22#define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT) 23#define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT)
@@ -27,7 +28,7 @@
27#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 28#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
28#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 29#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
29#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) 30#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT))
30#define HUGE_MAX_HSTATE 3 31#define HUGE_MAX_HSTATE 4
31#endif 32#endif
32 33
33#ifndef __ASSEMBLY__ 34#ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 8a598528ec1f..6fbd931f0570 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -679,26 +679,27 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
679 return pte_pfn(pte); 679 return pte_pfn(pte);
680} 680}
681 681
682#ifdef CONFIG_TRANSPARENT_HUGEPAGE 682#define __HAVE_ARCH_PMD_WRITE
683static inline unsigned long pmd_dirty(pmd_t pmd) 683static inline unsigned long pmd_write(pmd_t pmd)
684{ 684{
685 pte_t pte = __pte(pmd_val(pmd)); 685 pte_t pte = __pte(pmd_val(pmd));
686 686
687 return pte_dirty(pte); 687 return pte_write(pte);
688} 688}
689 689
690static inline unsigned long pmd_young(pmd_t pmd) 690#ifdef CONFIG_TRANSPARENT_HUGEPAGE
691static inline unsigned long pmd_dirty(pmd_t pmd)
691{ 692{
692 pte_t pte = __pte(pmd_val(pmd)); 693 pte_t pte = __pte(pmd_val(pmd));
693 694
694 return pte_young(pte); 695 return pte_dirty(pte);
695} 696}
696 697
697static inline unsigned long pmd_write(pmd_t pmd) 698static inline unsigned long pmd_young(pmd_t pmd)
698{ 699{
699 pte_t pte = __pte(pmd_val(pmd)); 700 pte_t pte = __pte(pmd_val(pmd));
700 701
701 return pte_write(pte); 702 return pte_young(pte);
702} 703}
703 704
704static inline unsigned long pmd_trans_huge(pmd_t pmd) 705static inline unsigned long pmd_trans_huge(pmd_t pmd)
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h
index 365d4cb267b4..dd27159819eb 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -18,12 +18,6 @@
18#include <asm/signal.h> 18#include <asm/signal.h>
19#include <asm/page.h> 19#include <asm/page.h>
20 20
21/*
22 * The sparc has no problems with write protection
23 */
24#define wp_works_ok 1
25#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
26
27/* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too... 21/* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too...
28 * That one page is used to protect kernel from intruders, so that 22 * That one page is used to protect kernel from intruders, so that
29 * we can make our access_ok test faster 23 * we can make our access_ok test faster
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h
index 6448cfc8292f..b58ee9018433 100644
--- a/arch/sparc/include/asm/processor_64.h
+++ b/arch/sparc/include/asm/processor_64.h
@@ -18,10 +18,6 @@
18#include <asm/ptrace.h> 18#include <asm/ptrace.h>
19#include <asm/page.h> 19#include <asm/page.h>
20 20
21/* The sparc has no problems with write protection */
22#define wp_works_ok 1
23#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
24
25/* 21/*
26 * User lives in his very own context, and cannot reference us. Note 22 * User lives in his very own context, and cannot reference us. Note
27 * that TASK_SIZE is a misnomer, it really gives maximum user virtual 23 * that TASK_SIZE is a misnomer, it really gives maximum user virtual
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 6aa3da152c20..44101196d02b 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -96,6 +96,7 @@ sparc64_boot:
96 andn %g1, PSTATE_AM, %g1 96 andn %g1, PSTATE_AM, %g1
97 wrpr %g1, 0x0, %pstate 97 wrpr %g1, 0x0, %pstate
98 ba,a,pt %xcc, 1f 98 ba,a,pt %xcc, 1f
99 nop
99 100
100 .globl prom_finddev_name, prom_chosen_path, prom_root_node 101 .globl prom_finddev_name, prom_chosen_path, prom_root_node
101 .globl prom_getprop_name, prom_mmu_name, prom_peer_name 102 .globl prom_getprop_name, prom_mmu_name, prom_peer_name
@@ -613,6 +614,7 @@ niagara_tlb_fixup:
613 nop 614 nop
614 615
615 ba,a,pt %xcc, 80f 616 ba,a,pt %xcc, 80f
617 nop
616niagara4_patch: 618niagara4_patch:
617 call niagara4_patch_copyops 619 call niagara4_patch_copyops
618 nop 620 nop
@@ -622,6 +624,7 @@ niagara4_patch:
622 nop 624 nop
623 625
624 ba,a,pt %xcc, 80f 626 ba,a,pt %xcc, 80f
627 nop
625 628
626niagara2_patch: 629niagara2_patch:
627 call niagara2_patch_copyops 630 call niagara2_patch_copyops
@@ -632,6 +635,7 @@ niagara2_patch:
632 nop 635 nop
633 636
634 ba,a,pt %xcc, 80f 637 ba,a,pt %xcc, 80f
638 nop
635 639
636niagara_patch: 640niagara_patch:
637 call niagara_patch_copyops 641 call niagara_patch_copyops
diff --git a/arch/sparc/kernel/misctrap.S b/arch/sparc/kernel/misctrap.S
index 34b4933900bf..9276d2f0dd86 100644
--- a/arch/sparc/kernel/misctrap.S
+++ b/arch/sparc/kernel/misctrap.S
@@ -82,6 +82,7 @@ do_stdfmna:
82 call handle_stdfmna 82 call handle_stdfmna
83 add %sp, PTREGS_OFF, %o0 83 add %sp, PTREGS_OFF, %o0
84 ba,a,pt %xcc, rtrap 84 ba,a,pt %xcc, rtrap
85 nop
85 .size do_stdfmna,.-do_stdfmna 86 .size do_stdfmna,.-do_stdfmna
86 87
87 .type breakpoint_trap,#function 88 .type breakpoint_trap,#function
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index df9e731a76f5..fc5124ccdb53 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -351,7 +351,7 @@ static int genregs64_set(struct task_struct *target,
351 } 351 }
352 352
353 if (!ret) { 353 if (!ret) {
354 unsigned long y; 354 unsigned long y = regs->y;
355 355
356 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 356 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
357 &y, 357 &y,
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index 216948ca4382..709a82ebd294 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -237,6 +237,7 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
237 bne,pt %xcc, user_rtt_fill_32bit 237 bne,pt %xcc, user_rtt_fill_32bit
238 wrpr %g1, %cwp 238 wrpr %g1, %cwp
239 ba,a,pt %xcc, user_rtt_fill_64bit 239 ba,a,pt %xcc, user_rtt_fill_64bit
240 nop
240 241
241user_rtt_fill_fixup_dax: 242user_rtt_fill_fixup_dax:
242 ba,pt %xcc, user_rtt_fill_fixup_common 243 ba,pt %xcc, user_rtt_fill_fixup_common
diff --git a/arch/sparc/kernel/spiterrs.S b/arch/sparc/kernel/spiterrs.S
index 4a73009f66a5..d7e540842809 100644
--- a/arch/sparc/kernel/spiterrs.S
+++ b/arch/sparc/kernel/spiterrs.S
@@ -86,6 +86,7 @@ __spitfire_cee_trap_continue:
86 rd %pc, %g7 86 rd %pc, %g7
87 87
88 ba,a,pt %xcc, 2f 88 ba,a,pt %xcc, 2f
89 nop
89 90
901: ba,pt %xcc, etrap_irq 911: ba,pt %xcc, etrap_irq
91 rd %pc, %g7 92 rd %pc, %g7
diff --git a/arch/sparc/kernel/sun4v_tlb_miss.S b/arch/sparc/kernel/sun4v_tlb_miss.S
index 6179e19bc9b9..c19f352f46c7 100644
--- a/arch/sparc/kernel/sun4v_tlb_miss.S
+++ b/arch/sparc/kernel/sun4v_tlb_miss.S
@@ -352,6 +352,7 @@ sun4v_mna:
352 call sun4v_do_mna 352 call sun4v_do_mna
353 add %sp, PTREGS_OFF, %o0 353 add %sp, PTREGS_OFF, %o0
354 ba,a,pt %xcc, rtrap 354 ba,a,pt %xcc, rtrap
355 nop
355 356
356 /* Privileged Action. */ 357 /* Privileged Action. */
357sun4v_privact: 358sun4v_privact:
diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S
index 5604a2b051d4..364af3250646 100644
--- a/arch/sparc/kernel/urtt_fill.S
+++ b/arch/sparc/kernel/urtt_fill.S
@@ -92,6 +92,7 @@ user_rtt_fill_fixup_common:
92 call sun4v_data_access_exception 92 call sun4v_data_access_exception
93 nop 93 nop
94 ba,a,pt %xcc, rtrap 94 ba,a,pt %xcc, rtrap
95 nop
95 96
961: call spitfire_data_access_exception 971: call spitfire_data_access_exception
97 nop 98 nop
diff --git a/arch/sparc/kernel/winfixup.S b/arch/sparc/kernel/winfixup.S
index 855019a8590e..1ee173cc3c39 100644
--- a/arch/sparc/kernel/winfixup.S
+++ b/arch/sparc/kernel/winfixup.S
@@ -152,6 +152,8 @@ fill_fixup_dax:
152 call sun4v_data_access_exception 152 call sun4v_data_access_exception
153 nop 153 nop
154 ba,a,pt %xcc, rtrap 154 ba,a,pt %xcc, rtrap
155 nop
1551: call spitfire_data_access_exception 1561: call spitfire_data_access_exception
156 nop 157 nop
157 ba,a,pt %xcc, rtrap 158 ba,a,pt %xcc, rtrap
159 nop
diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S
index c629dbd121b6..64dcd6cdb606 100644
--- a/arch/sparc/lib/NG2memcpy.S
+++ b/arch/sparc/lib/NG2memcpy.S
@@ -326,11 +326,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
326 blu 170f 326 blu 170f
327 nop 327 nop
328 ba,a,pt %xcc, 180f 328 ba,a,pt %xcc, 180f
329 nop
329 330
3304: /* 32 <= low bits < 48 */ 3314: /* 32 <= low bits < 48 */
331 blu 150f 332 blu 150f
332 nop 333 nop
333 ba,a,pt %xcc, 160f 334 ba,a,pt %xcc, 160f
335 nop
3345: /* 0 < low bits < 32 */ 3365: /* 0 < low bits < 32 */
335 blu,a 6f 337 blu,a 6f
336 cmp %g2, 8 338 cmp %g2, 8
@@ -338,6 +340,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
338 blu 130f 340 blu 130f
339 nop 341 nop
340 ba,a,pt %xcc, 140f 342 ba,a,pt %xcc, 140f
343 nop
3416: /* 0 < low bits < 16 */ 3446: /* 0 < low bits < 16 */
342 bgeu 120f 345 bgeu 120f
343 nop 346 nop
@@ -475,6 +478,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
475 brz,pt %o2, 85f 478 brz,pt %o2, 85f
476 sub %o0, %o1, GLOBAL_SPARE 479 sub %o0, %o1, GLOBAL_SPARE
477 ba,a,pt %XCC, 90f 480 ba,a,pt %XCC, 90f
481 nop
478 482
479 .align 64 483 .align 64
48075: /* 16 < len <= 64 */ 48475: /* 16 < len <= 64 */
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
index 75bb93b1437f..78ea962edcbe 100644
--- a/arch/sparc/lib/NG4memcpy.S
+++ b/arch/sparc/lib/NG4memcpy.S
@@ -530,4 +530,5 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
530 bne,pt %icc, 1b 530 bne,pt %icc, 1b
531 EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1) 531 EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1)
532 ba,a,pt %icc, .Lexit 532 ba,a,pt %icc, .Lexit
533 nop
533 .size FUNC_NAME, .-FUNC_NAME 534 .size FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc/lib/NG4memset.S b/arch/sparc/lib/NG4memset.S
index 41da4bdd95cb..7c0c81f18837 100644
--- a/arch/sparc/lib/NG4memset.S
+++ b/arch/sparc/lib/NG4memset.S
@@ -102,4 +102,5 @@ NG4bzero:
102 bne,pt %icc, 1b 102 bne,pt %icc, 1b
103 add %o0, 0x30, %o0 103 add %o0, 0x30, %o0
104 ba,a,pt %icc, .Lpostloop 104 ba,a,pt %icc, .Lpostloop
105 nop
105 .size NG4bzero,.-NG4bzero 106 .size NG4bzero,.-NG4bzero
diff --git a/arch/sparc/lib/NGmemcpy.S b/arch/sparc/lib/NGmemcpy.S
index d88c4ed50a00..cd654a719b27 100644
--- a/arch/sparc/lib/NGmemcpy.S
+++ b/arch/sparc/lib/NGmemcpy.S
@@ -394,6 +394,7 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
394 brz,pt %i2, 85f 394 brz,pt %i2, 85f
395 sub %o0, %i1, %i3 395 sub %o0, %i1, %i3
396 ba,a,pt %XCC, 90f 396 ba,a,pt %XCC, 90f
397 nop
397 398
398 .align 64 399 .align 64
39970: /* 16 < len <= 64 */ 40070: /* 16 < len <= 64 */
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 323bc6b6e3ad..7c29d38e6b99 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -143,6 +143,10 @@ static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
143 pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; 143 pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
144 144
145 switch (shift) { 145 switch (shift) {
146 case HPAGE_2GB_SHIFT:
147 hugepage_size = _PAGE_SZ2GB_4V;
148 pte_val(entry) |= _PAGE_PMD_HUGE;
149 break;
146 case HPAGE_256MB_SHIFT: 150 case HPAGE_256MB_SHIFT:
147 hugepage_size = _PAGE_SZ256MB_4V; 151 hugepage_size = _PAGE_SZ256MB_4V;
148 pte_val(entry) |= _PAGE_PMD_HUGE; 152 pte_val(entry) |= _PAGE_PMD_HUGE;
@@ -183,6 +187,9 @@ static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
183 unsigned int shift; 187 unsigned int shift;
184 188
185 switch (tte_szbits) { 189 switch (tte_szbits) {
190 case _PAGE_SZ2GB_4V:
191 shift = HPAGE_2GB_SHIFT;
192 break;
186 case _PAGE_SZ256MB_4V: 193 case _PAGE_SZ256MB_4V:
187 shift = HPAGE_256MB_SHIFT; 194 shift = HPAGE_256MB_SHIFT;
188 break; 195 break;
@@ -261,7 +268,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
261 if (!pmd) 268 if (!pmd)
262 return NULL; 269 return NULL;
263 270
264 if (sz == PMD_SHIFT) 271 if (sz >= PMD_SIZE)
265 pte = (pte_t *)pmd; 272 pte = (pte_t *)pmd;
266 else 273 else
267 pte = pte_alloc_map(mm, pmd, addr); 274 pte = pte_alloc_map(mm, pmd, addr);
@@ -454,6 +461,22 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
454 pgd_t *pgd; 461 pgd_t *pgd;
455 unsigned long next; 462 unsigned long next;
456 463
464 addr &= PMD_MASK;
465 if (addr < floor) {
466 addr += PMD_SIZE;
467 if (!addr)
468 return;
469 }
470 if (ceiling) {
471 ceiling &= PMD_MASK;
472 if (!ceiling)
473 return;
474 }
475 if (end - 1 > ceiling - 1)
476 end -= PMD_SIZE;
477 if (addr > end - 1)
478 return;
479
457 pgd = pgd_offset(tlb->mm, addr); 480 pgd = pgd_offset(tlb->mm, addr);
458 do { 481 do {
459 next = pgd_addr_end(addr, end); 482 next = pgd_addr_end(addr, end);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index ccd455328989..0cda653ae007 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -337,6 +337,10 @@ static int __init setup_hugepagesz(char *string)
337 hugepage_shift = ilog2(hugepage_size); 337 hugepage_shift = ilog2(hugepage_size);
338 338
339 switch (hugepage_shift) { 339 switch (hugepage_shift) {
340 case HPAGE_2GB_SHIFT:
341 hv_pgsz_mask = HV_PGSZ_MASK_2GB;
342 hv_pgsz_idx = HV_PGSZ_IDX_2GB;
343 break;
340 case HPAGE_256MB_SHIFT: 344 case HPAGE_256MB_SHIFT:
341 hv_pgsz_mask = HV_PGSZ_MASK_256MB; 345 hv_pgsz_mask = HV_PGSZ_MASK_256MB;
342 hv_pgsz_idx = HV_PGSZ_IDX_256MB; 346 hv_pgsz_idx = HV_PGSZ_IDX_256MB;
@@ -1563,7 +1567,7 @@ bool kern_addr_valid(unsigned long addr)
1563 if ((long)addr < 0L) { 1567 if ((long)addr < 0L) {
1564 unsigned long pa = __pa(addr); 1568 unsigned long pa = __pa(addr);
1565 1569
1566 if ((addr >> max_phys_bits) != 0UL) 1570 if ((pa >> max_phys_bits) != 0UL)
1567 return false; 1571 return false;
1568 1572
1569 return pfn_valid(pa >> PAGE_SHIFT); 1573 return pfn_valid(pa >> PAGE_SHIFT);
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index def82f6d626f..8e76ebba2986 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -54,6 +54,7 @@
54enum mbus_module srmmu_modtype; 54enum mbus_module srmmu_modtype;
55static unsigned int hwbug_bitmask; 55static unsigned int hwbug_bitmask;
56int vac_cache_size; 56int vac_cache_size;
57EXPORT_SYMBOL(vac_cache_size);
57int vac_line_size; 58int vac_line_size;
58 59
59extern struct resource sparc_iomap; 60extern struct resource sparc_iomap;
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index afda3bbf7854..ee8066c3d96c 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -154,7 +154,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
154 if (pte_val(*pte) & _PAGE_VALID) { 154 if (pte_val(*pte) & _PAGE_VALID) {
155 bool exec = pte_exec(*pte); 155 bool exec = pte_exec(*pte);
156 156
157 tlb_batch_add_one(mm, vaddr, exec, false); 157 tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
158 } 158 }
159 pte++; 159 pte++;
160 vaddr += PAGE_SIZE; 160 vaddr += PAGE_SIZE;
@@ -209,9 +209,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
209 pte_t orig_pte = __pte(pmd_val(orig)); 209 pte_t orig_pte = __pte(pmd_val(orig));
210 bool exec = pte_exec(orig_pte); 210 bool exec = pte_exec(orig_pte);
211 211
212 tlb_batch_add_one(mm, addr, exec, true); 212 tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
213 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec, 213 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
214 true); 214 REAL_HPAGE_SHIFT);
215 } else { 215 } else {
216 tlb_batch_pmd_scan(mm, addr, orig); 216 tlb_batch_pmd_scan(mm, addr, orig);
217 } 217 }
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 0a04811f06b7..bedf08b22a47 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -122,7 +122,7 @@ void flush_tsb_user(struct tlb_batch *tb)
122 122
123 spin_lock_irqsave(&mm->context.lock, flags); 123 spin_lock_irqsave(&mm->context.lock, flags);
124 124
125 if (tb->hugepage_shift < HPAGE_SHIFT) { 125 if (tb->hugepage_shift < REAL_HPAGE_SHIFT) {
126 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; 126 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
127 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; 127 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
128 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 128 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
@@ -155,7 +155,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
155 155
156 spin_lock_irqsave(&mm->context.lock, flags); 156 spin_lock_irqsave(&mm->context.lock, flags);
157 157
158 if (hugepage_shift < HPAGE_SHIFT) { 158 if (hugepage_shift < REAL_HPAGE_SHIFT) {
159 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; 159 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
160 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; 160 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
161 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 161 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 2d449337a360..a94a4d10f2df 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -120,10 +120,6 @@ else
120 # -funit-at-a-time shrinks the kernel .text considerably 120 # -funit-at-a-time shrinks the kernel .text considerably
121 # unfortunately it makes reading oopses harder. 121 # unfortunately it makes reading oopses harder.
122 KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time) 122 KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
123
124 # this works around some issues with generating unwind tables in older gccs
125 # newer gccs do it by default
126 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
127endif 123endif
128 124
129ifdef CONFIG_X86_X32 125ifdef CONFIG_X86_X32
@@ -147,6 +143,37 @@ ifeq ($(CONFIG_KMEMCHECK),y)
147 KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy) 143 KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy)
148endif 144endif
149 145
146#
147# If the function graph tracer is used with mcount instead of fentry,
148# '-maccumulate-outgoing-args' is needed to prevent a GCC bug
149# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=42109)
150#
151ifdef CONFIG_FUNCTION_GRAPH_TRACER
152 ifndef CONFIG_HAVE_FENTRY
153 ACCUMULATE_OUTGOING_ARGS := 1
154 else
155 ifeq ($(call cc-option-yn, -mfentry), n)
156 ACCUMULATE_OUTGOING_ARGS := 1
157 endif
158 endif
159endif
160
161#
162# Jump labels need '-maccumulate-outgoing-args' for gcc < 4.5.2 to prevent a
163# GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=46226). There's no way
164# to test for this bug at compile-time because the test case needs to execute,
165# which is a no-go for cross compilers. So check the GCC version instead.
166#
167ifdef CONFIG_JUMP_LABEL
168 ifneq ($(ACCUMULATE_OUTGOING_ARGS), 1)
169 ACCUMULATE_OUTGOING_ARGS = $(call cc-if-fullversion, -lt, 040502, 1)
170 endif
171endif
172
173ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
174 KBUILD_CFLAGS += -maccumulate-outgoing-args
175endif
176
150# Stackpointer is addressed different for 32 bit and 64 bit x86 177# Stackpointer is addressed different for 32 bit and 64 bit x86
151sp-$(CONFIG_X86_32) := esp 178sp-$(CONFIG_X86_32) := esp
152sp-$(CONFIG_X86_64) := rsp 179sp-$(CONFIG_X86_64) := rsp
diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
index 6647ed49c66c..a45eb15b7cf2 100644
--- a/arch/x86/Makefile_32.cpu
+++ b/arch/x86/Makefile_32.cpu
@@ -45,24 +45,6 @@ cflags-$(CONFIG_MGEODE_LX) += $(call cc-option,-march=geode,-march=pentium-mmx)
45# cpu entries 45# cpu entries
46cflags-$(CONFIG_X86_GENERIC) += $(call tune,generic,$(call tune,i686)) 46cflags-$(CONFIG_X86_GENERIC) += $(call tune,generic,$(call tune,i686))
47 47
48# Work around the pentium-mmx code generator madness of gcc4.4.x which
49# does stack alignment by generating horrible code _before_ the mcount
50# prologue (push %ebp, mov %esp, %ebp) which breaks the function graph
51# tracer assumptions. For i686, generic, core2 this is set by the
52# compiler anyway
53ifeq ($(CONFIG_FUNCTION_GRAPH_TRACER), y)
54ADD_ACCUMULATE_OUTGOING_ARGS := y
55endif
56
57# Work around to a bug with asm goto with first implementations of it
58# in gcc causing gcc to mess up the push and pop of the stack in some
59# uses of asm goto.
60ifeq ($(CONFIG_JUMP_LABEL), y)
61ADD_ACCUMULATE_OUTGOING_ARGS := y
62endif
63
64cflags-$(ADD_ACCUMULATE_OUTGOING_ARGS) += $(call cc-option,-maccumulate-outgoing-args)
65
66# Bug fix for binutils: this option is required in order to keep 48# Bug fix for binutils: this option is required in order to keep
67# binutils from generating NOPL instructions against our will. 49# binutils from generating NOPL instructions against our will.
68ifneq ($(CONFIG_X86_P6_NOP),y) 50ifneq ($(CONFIG_X86_P6_NOP),y)
diff --git a/arch/x86/boot/compressed/error.c b/arch/x86/boot/compressed/error.c
index 6248740b68b5..31922023de49 100644
--- a/arch/x86/boot/compressed/error.c
+++ b/arch/x86/boot/compressed/error.c
@@ -4,6 +4,7 @@
4 * memcpy() and memmove() are defined for the compressed boot environment. 4 * memcpy() and memmove() are defined for the compressed boot environment.
5 */ 5 */
6#include "misc.h" 6#include "misc.h"
7#include "error.h"
7 8
8void warn(char *m) 9void warn(char *m)
9{ 10{
diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
index 7853b53959cd..3f9d1a83891a 100644
--- a/arch/x86/entry/vdso/vdso32-setup.c
+++ b/arch/x86/entry/vdso/vdso32-setup.c
@@ -30,8 +30,10 @@ static int __init vdso32_setup(char *s)
30{ 30{
31 vdso32_enabled = simple_strtoul(s, NULL, 0); 31 vdso32_enabled = simple_strtoul(s, NULL, 0);
32 32
33 if (vdso32_enabled > 1) 33 if (vdso32_enabled > 1) {
34 pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n"); 34 pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
35 vdso32_enabled = 0;
36 }
35 37
36 return 1; 38 return 1;
37} 39}
@@ -62,13 +64,18 @@ subsys_initcall(sysenter_setup);
62/* Register vsyscall32 into the ABI table */ 64/* Register vsyscall32 into the ABI table */
63#include <linux/sysctl.h> 65#include <linux/sysctl.h>
64 66
67static const int zero;
68static const int one = 1;
69
65static struct ctl_table abi_table2[] = { 70static struct ctl_table abi_table2[] = {
66 { 71 {
67 .procname = "vsyscall32", 72 .procname = "vsyscall32",
68 .data = &vdso32_enabled, 73 .data = &vdso32_enabled,
69 .maxlen = sizeof(int), 74 .maxlen = sizeof(int),
70 .mode = 0644, 75 .mode = 0644,
71 .proc_handler = proc_dointvec 76 .proc_handler = proc_dointvec_minmax,
77 .extra1 = (int *)&zero,
78 .extra2 = (int *)&one,
72 }, 79 },
73 {} 80 {}
74}; 81};
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 2aa1ad194db2..580b60f5ac83 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2256,6 +2256,7 @@ void arch_perf_update_userpage(struct perf_event *event,
2256 struct perf_event_mmap_page *userpg, u64 now) 2256 struct perf_event_mmap_page *userpg, u64 now)
2257{ 2257{
2258 struct cyc2ns_data *data; 2258 struct cyc2ns_data *data;
2259 u64 offset;
2259 2260
2260 userpg->cap_user_time = 0; 2261 userpg->cap_user_time = 0;
2261 userpg->cap_user_time_zero = 0; 2262 userpg->cap_user_time_zero = 0;
@@ -2263,11 +2264,13 @@ void arch_perf_update_userpage(struct perf_event *event,
2263 !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED); 2264 !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED);
2264 userpg->pmc_width = x86_pmu.cntval_bits; 2265 userpg->pmc_width = x86_pmu.cntval_bits;
2265 2266
2266 if (!sched_clock_stable()) 2267 if (!using_native_sched_clock() || !sched_clock_stable())
2267 return; 2268 return;
2268 2269
2269 data = cyc2ns_read_begin(); 2270 data = cyc2ns_read_begin();
2270 2271
2272 offset = data->cyc2ns_offset + __sched_clock_offset;
2273
2271 /* 2274 /*
2272 * Internal timekeeping for enabled/running/stopped times 2275 * Internal timekeeping for enabled/running/stopped times
2273 * is always in the local_clock domain. 2276 * is always in the local_clock domain.
@@ -2275,7 +2278,7 @@ void arch_perf_update_userpage(struct perf_event *event,
2275 userpg->cap_user_time = 1; 2278 userpg->cap_user_time = 1;
2276 userpg->time_mult = data->cyc2ns_mul; 2279 userpg->time_mult = data->cyc2ns_mul;
2277 userpg->time_shift = data->cyc2ns_shift; 2280 userpg->time_shift = data->cyc2ns_shift;
2278 userpg->time_offset = data->cyc2ns_offset - now; 2281 userpg->time_offset = offset - now;
2279 2282
2280 /* 2283 /*
2281 * cap_user_time_zero doesn't make sense when we're using a different 2284 * cap_user_time_zero doesn't make sense when we're using a different
@@ -2283,7 +2286,7 @@ void arch_perf_update_userpage(struct perf_event *event,
2283 */ 2286 */
2284 if (!event->attr.use_clockid) { 2287 if (!event->attr.use_clockid) {
2285 userpg->cap_user_time_zero = 1; 2288 userpg->cap_user_time_zero = 1;
2286 userpg->time_zero = data->cyc2ns_offset; 2289 userpg->time_zero = offset;
2287 } 2290 }
2288 2291
2289 cyc2ns_read_end(data); 2292 cyc2ns_read_end(data);
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 81b321ace8e0..f924629836a8 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -507,6 +507,9 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
507 cpuc->lbr_entries[i].to = msr_lastbranch.to; 507 cpuc->lbr_entries[i].to = msr_lastbranch.to;
508 cpuc->lbr_entries[i].mispred = 0; 508 cpuc->lbr_entries[i].mispred = 0;
509 cpuc->lbr_entries[i].predicted = 0; 509 cpuc->lbr_entries[i].predicted = 0;
510 cpuc->lbr_entries[i].in_tx = 0;
511 cpuc->lbr_entries[i].abort = 0;
512 cpuc->lbr_entries[i].cycles = 0;
510 cpuc->lbr_entries[i].reserved = 0; 513 cpuc->lbr_entries[i].reserved = 0;
511 } 514 }
512 cpuc->lbr_stack.nr = i; 515 cpuc->lbr_stack.nr = i;
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 9d49c18b5ea9..3762536619f8 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -287,7 +287,7 @@ struct task_struct;
287 287
288#define ARCH_DLINFO_IA32 \ 288#define ARCH_DLINFO_IA32 \
289do { \ 289do { \
290 if (vdso32_enabled) { \ 290 if (VDSO_CURRENT_BASE) { \
291 NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ 291 NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
292 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ 292 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
293 } \ 293 } \
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
index d74747b031ec..c4eda791f877 100644
--- a/arch/x86/include/asm/kvm_page_track.h
+++ b/arch/x86/include/asm/kvm_page_track.h
@@ -46,6 +46,7 @@ struct kvm_page_track_notifier_node {
46}; 46};
47 47
48void kvm_page_track_init(struct kvm *kvm); 48void kvm_page_track_init(struct kvm *kvm);
49void kvm_page_track_cleanup(struct kvm *kvm);
49 50
50void kvm_page_track_free_memslot(struct kvm_memory_slot *free, 51void kvm_page_track_free_memslot(struct kvm_memory_slot *free,
51 struct kvm_memory_slot *dont); 52 struct kvm_memory_slot *dont);
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index 2c1ebeb4d737..529bb4a6487a 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -55,7 +55,8 @@ static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
55 * @size: number of bytes to write back 55 * @size: number of bytes to write back
56 * 56 *
57 * Write back a cache range using the CLWB (cache line write back) 57 * Write back a cache range using the CLWB (cache line write back)
58 * instruction. 58 * instruction. Note that @size is internally rounded up to be cache
59 * line size aligned.
59 */ 60 */
60static inline void arch_wb_cache_pmem(void *addr, size_t size) 61static inline void arch_wb_cache_pmem(void *addr, size_t size)
61{ 62{
@@ -69,15 +70,6 @@ static inline void arch_wb_cache_pmem(void *addr, size_t size)
69 clwb(p); 70 clwb(p);
70} 71}
71 72
72/*
73 * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
74 * iterators, so for other types (bvec & kvec) we must do a cache write-back.
75 */
76static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
77{
78 return iter_is_iovec(i) == false;
79}
80
81/** 73/**
82 * arch_copy_from_iter_pmem - copy data from an iterator to PMEM 74 * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
83 * @addr: PMEM destination address 75 * @addr: PMEM destination address
@@ -94,7 +86,35 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
94 /* TODO: skip the write-back by always using non-temporal stores */ 86 /* TODO: skip the write-back by always using non-temporal stores */
95 len = copy_from_iter_nocache(addr, bytes, i); 87 len = copy_from_iter_nocache(addr, bytes, i);
96 88
97 if (__iter_needs_pmem_wb(i)) 89 /*
90 * In the iovec case on x86_64 copy_from_iter_nocache() uses
91 * non-temporal stores for the bulk of the transfer, but we need
92 * to manually flush if the transfer is unaligned. A cached
93 * memory copy is used when destination or size is not naturally
94 * aligned. That is:
95 * - Require 8-byte alignment when size is 8 bytes or larger.
96 * - Require 4-byte alignment when size is 4 bytes.
97 *
98 * In the non-iovec case the entire destination needs to be
99 * flushed.
100 */
101 if (iter_is_iovec(i)) {
102 unsigned long flushed, dest = (unsigned long) addr;
103
104 if (bytes < 8) {
105 if (!IS_ALIGNED(dest, 4) || (bytes != 4))
106 arch_wb_cache_pmem(addr, 1);
107 } else {
108 if (!IS_ALIGNED(dest, 8)) {
109 dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
110 arch_wb_cache_pmem(addr, 1);
111 }
112
113 flushed = dest - (unsigned long) addr;
114 if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
115 arch_wb_cache_pmem(addr + bytes - 1, 1);
116 }
117 } else
98 arch_wb_cache_pmem(addr, bytes); 118 arch_wb_cache_pmem(addr, bytes);
99 119
100 return len; 120 return len;
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index a04eabd43d06..27e9f9d769b8 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -12,6 +12,8 @@ extern int recalibrate_cpu_khz(void);
12 12
13extern int no_timer_check; 13extern int no_timer_check;
14 14
15extern bool using_native_sched_clock(void);
16
15/* 17/*
16 * We use the full linear equation: f(x) = a + b*x, in order to allow 18 * We use the full linear equation: f(x) = a + b*x, in order to allow
17 * a continuous function in the face of dynamic freq changes. 19 * a continuous function in the face of dynamic freq changes.
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 72e8300b1e8a..9cffb44a3cf5 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -485,15 +485,17 @@ static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
485 485
486 if (paddr < uv_hub_info->lowmem_remap_top) 486 if (paddr < uv_hub_info->lowmem_remap_top)
487 paddr |= uv_hub_info->lowmem_remap_base; 487 paddr |= uv_hub_info->lowmem_remap_base;
488 paddr |= uv_hub_info->gnode_upper; 488
489 if (m_val) 489 if (m_val) {
490 paddr |= uv_hub_info->gnode_upper;
490 paddr = ((paddr << uv_hub_info->m_shift) 491 paddr = ((paddr << uv_hub_info->m_shift)
491 >> uv_hub_info->m_shift) | 492 >> uv_hub_info->m_shift) |
492 ((paddr >> uv_hub_info->m_val) 493 ((paddr >> uv_hub_info->m_val)
493 << uv_hub_info->n_lshift); 494 << uv_hub_info->n_lshift);
494 else 495 } else {
495 paddr |= uv_soc_phys_ram_to_nasid(paddr) 496 paddr |= uv_soc_phys_ram_to_nasid(paddr)
496 << uv_hub_info->gpa_shift; 497 << uv_hub_info->gpa_shift;
498 }
497 return paddr; 499 return paddr;
498} 500}
499 501
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index e9f8f8cdd570..86f20cc0a65e 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -1105,7 +1105,8 @@ void __init uv_init_hub_info(struct uv_hub_info_s *hi)
1105 node_id.v = uv_read_local_mmr(UVH_NODE_ID); 1105 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
1106 uv_cpuid.gnode_shift = max_t(unsigned int, uv_cpuid.gnode_shift, mn.n_val); 1106 uv_cpuid.gnode_shift = max_t(unsigned int, uv_cpuid.gnode_shift, mn.n_val);
1107 hi->gnode_extra = (node_id.s.node_id & ~((1 << uv_cpuid.gnode_shift) - 1)) >> 1; 1107 hi->gnode_extra = (node_id.s.node_id & ~((1 << uv_cpuid.gnode_shift) - 1)) >> 1;
1108 hi->gnode_upper = (unsigned long)hi->gnode_extra << mn.m_val; 1108 if (mn.m_val)
1109 hi->gnode_upper = (u64)hi->gnode_extra << mn.m_val;
1109 1110
1110 if (uv_gp_table) { 1111 if (uv_gp_table) {
1111 hi->global_mmr_base = uv_gp_table->mmr_base; 1112 hi->global_mmr_base = uv_gp_table->mmr_base;
diff --git a/arch/x86/kernel/cpu/intel_rdt_schemata.c b/arch/x86/kernel/cpu/intel_rdt_schemata.c
index f369cb8db0d5..badd2b31a560 100644
--- a/arch/x86/kernel/cpu/intel_rdt_schemata.c
+++ b/arch/x86/kernel/cpu/intel_rdt_schemata.c
@@ -200,11 +200,11 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
200 } 200 }
201 201
202out: 202out:
203 rdtgroup_kn_unlock(of->kn);
204 for_each_enabled_rdt_resource(r) { 203 for_each_enabled_rdt_resource(r) {
205 kfree(r->tmp_cbms); 204 kfree(r->tmp_cbms);
206 r->tmp_cbms = NULL; 205 r->tmp_cbms = NULL;
207 } 206 }
207 rdtgroup_kn_unlock(of->kn);
208 return ret ?: nbytes; 208 return ret ?: nbytes;
209} 209}
210 210
diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
index 1e5a50c11d3c..217cd4449bc9 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-genpool.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
@@ -85,7 +85,7 @@ void mce_gen_pool_process(struct work_struct *__unused)
85 head = llist_reverse_order(head); 85 head = llist_reverse_order(head);
86 llist_for_each_entry_safe(node, tmp, head, llnode) { 86 llist_for_each_entry_safe(node, tmp, head, llnode) {
87 mce = &node->mce; 87 mce = &node->mce;
88 atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce); 88 blocking_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
89 gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node)); 89 gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
90 } 90 }
91} 91}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index 903043e6a62b..19592ba1a320 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -13,7 +13,7 @@ enum severity_level {
13 MCE_PANIC_SEVERITY, 13 MCE_PANIC_SEVERITY,
14}; 14};
15 15
16extern struct atomic_notifier_head x86_mce_decoder_chain; 16extern struct blocking_notifier_head x86_mce_decoder_chain;
17 17
18#define ATTR_LEN 16 18#define ATTR_LEN 16
19#define INITIAL_CHECK_INTERVAL 5 * 60 /* 5 minutes */ 19#define INITIAL_CHECK_INTERVAL 5 * 60 /* 5 minutes */
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 8e9725c607ea..af44ebeb593f 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -54,6 +54,8 @@
54 54
55static DEFINE_MUTEX(mce_chrdev_read_mutex); 55static DEFINE_MUTEX(mce_chrdev_read_mutex);
56 56
57static int mce_chrdev_open_count; /* #times opened */
58
57#define mce_log_get_idx_check(p) \ 59#define mce_log_get_idx_check(p) \
58({ \ 60({ \
59 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ 61 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
@@ -121,7 +123,7 @@ static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
121 * CPU/chipset specific EDAC code can register a notifier call here to print 123 * CPU/chipset specific EDAC code can register a notifier call here to print
122 * MCE errors in a human-readable form. 124 * MCE errors in a human-readable form.
123 */ 125 */
124ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); 126BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain);
125 127
126/* Do initial initialization of a struct mce */ 128/* Do initial initialization of a struct mce */
127void mce_setup(struct mce *m) 129void mce_setup(struct mce *m)
@@ -218,7 +220,7 @@ void mce_register_decode_chain(struct notifier_block *nb)
218 220
219 WARN_ON(nb->priority > MCE_PRIO_LOWEST && nb->priority < MCE_PRIO_EDAC); 221 WARN_ON(nb->priority > MCE_PRIO_LOWEST && nb->priority < MCE_PRIO_EDAC);
220 222
221 atomic_notifier_chain_register(&x86_mce_decoder_chain, nb); 223 blocking_notifier_chain_register(&x86_mce_decoder_chain, nb);
222} 224}
223EXPORT_SYMBOL_GPL(mce_register_decode_chain); 225EXPORT_SYMBOL_GPL(mce_register_decode_chain);
224 226
@@ -226,7 +228,7 @@ void mce_unregister_decode_chain(struct notifier_block *nb)
226{ 228{
227 atomic_dec(&num_notifiers); 229 atomic_dec(&num_notifiers);
228 230
229 atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb); 231 blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
230} 232}
231EXPORT_SYMBOL_GPL(mce_unregister_decode_chain); 233EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
232 234
@@ -319,18 +321,7 @@ static void __print_mce(struct mce *m)
319 321
320static void print_mce(struct mce *m) 322static void print_mce(struct mce *m)
321{ 323{
322 int ret = 0;
323
324 __print_mce(m); 324 __print_mce(m);
325
326 /*
327 * Print out human-readable details about the MCE error,
328 * (if the CPU has an implementation for that)
329 */
330 ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
331 if (ret == NOTIFY_STOP)
332 return;
333
334 pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n"); 325 pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
335} 326}
336 327
@@ -598,6 +589,10 @@ static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
598 if (atomic_read(&num_notifiers) > 2) 589 if (atomic_read(&num_notifiers) > 2)
599 return NOTIFY_DONE; 590 return NOTIFY_DONE;
600 591
592 /* Don't print when mcelog is running */
593 if (mce_chrdev_open_count > 0)
594 return NOTIFY_DONE;
595
601 __print_mce(m); 596 __print_mce(m);
602 597
603 return NOTIFY_DONE; 598 return NOTIFY_DONE;
@@ -1828,7 +1823,6 @@ void mcheck_cpu_clear(struct cpuinfo_x86 *c)
1828 */ 1823 */
1829 1824
1830static DEFINE_SPINLOCK(mce_chrdev_state_lock); 1825static DEFINE_SPINLOCK(mce_chrdev_state_lock);
1831static int mce_chrdev_open_count; /* #times opened */
1832static int mce_chrdev_open_exclu; /* already open exclusive? */ 1826static int mce_chrdev_open_exclu; /* already open exclusive? */
1833 1827
1834static int mce_chrdev_open(struct inode *inode, struct file *file) 1828static int mce_chrdev_open(struct inode *inode, struct file *file)
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 524cc5780a77..6e4a047e4b68 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -60,7 +60,7 @@ static const char * const th_names[] = {
60 "load_store", 60 "load_store",
61 "insn_fetch", 61 "insn_fetch",
62 "combined_unit", 62 "combined_unit",
63 "", 63 "decode_unit",
64 "northbridge", 64 "northbridge",
65 "execution_unit", 65 "execution_unit",
66}; 66};
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 8f3d9cf26ff9..cbd73eb42170 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -29,6 +29,12 @@
29#include <asm/ftrace.h> 29#include <asm/ftrace.h>
30#include <asm/nops.h> 30#include <asm/nops.h>
31 31
32#if defined(CONFIG_FUNCTION_GRAPH_TRACER) && \
33 !defined(CC_USING_FENTRY) && \
34 !defined(CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE)
35# error The following combination is not supported: ((compiler missing -mfentry) || (CONFIG_X86_32 and !CONFIG_DYNAMIC_FTRACE)) && CONFIG_FUNCTION_GRAPH_TRACER && CONFIG_CC_OPTIMIZE_FOR_SIZE
36#endif
37
32#ifdef CONFIG_DYNAMIC_FTRACE 38#ifdef CONFIG_DYNAMIC_FTRACE
33 39
34int ftrace_arch_code_modify_prepare(void) 40int ftrace_arch_code_modify_prepare(void)
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 396c042e9d0e..cc30a74e4adb 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -846,7 +846,7 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
846 task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG, 846 task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
847 me->comm, me->pid, where, frame, 847 me->comm, me->pid, where, frame,
848 regs->ip, regs->sp, regs->orig_ax); 848 regs->ip, regs->sp, regs->orig_ax);
849 print_vma_addr(" in ", regs->ip); 849 print_vma_addr(KERN_CONT " in ", regs->ip);
850 pr_cont("\n"); 850 pr_cont("\n");
851 } 851 }
852 852
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
index ec1f756f9dc9..71beb28600d4 100644
--- a/arch/x86/kernel/signal_compat.c
+++ b/arch/x86/kernel/signal_compat.c
@@ -151,8 +151,8 @@ int __copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from,
151 151
152 if (from->si_signo == SIGSEGV) { 152 if (from->si_signo == SIGSEGV) {
153 if (from->si_code == SEGV_BNDERR) { 153 if (from->si_code == SEGV_BNDERR) {
154 compat_uptr_t lower = (unsigned long)&to->si_lower; 154 compat_uptr_t lower = (unsigned long)from->si_lower;
155 compat_uptr_t upper = (unsigned long)&to->si_upper; 155 compat_uptr_t upper = (unsigned long)from->si_upper;
156 put_user_ex(lower, &to->si_lower); 156 put_user_ex(lower, &to->si_lower);
157 put_user_ex(upper, &to->si_upper); 157 put_user_ex(upper, &to->si_upper);
158 } 158 }
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 948443e115c1..4e496379a871 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -255,7 +255,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
255 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx", 255 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
256 tsk->comm, tsk->pid, str, 256 tsk->comm, tsk->pid, str,
257 regs->ip, regs->sp, error_code); 257 regs->ip, regs->sp, error_code);
258 print_vma_addr(" in ", regs->ip); 258 print_vma_addr(KERN_CONT " in ", regs->ip);
259 pr_cont("\n"); 259 pr_cont("\n");
260 } 260 }
261 261
@@ -519,7 +519,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
519 pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx", 519 pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
520 tsk->comm, task_pid_nr(tsk), 520 tsk->comm, task_pid_nr(tsk),
521 regs->ip, regs->sp, error_code); 521 regs->ip, regs->sp, error_code);
522 print_vma_addr(" in ", regs->ip); 522 print_vma_addr(KERN_CONT " in ", regs->ip);
523 pr_cont("\n"); 523 pr_cont("\n");
524 } 524 }
525 525
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index c73a7f9e881a..714dfba6a1e7 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -328,7 +328,7 @@ unsigned long long sched_clock(void)
328 return paravirt_sched_clock(); 328 return paravirt_sched_clock();
329} 329}
330 330
331static inline bool using_native_sched_clock(void) 331bool using_native_sched_clock(void)
332{ 332{
333 return pv_time_ops.sched_clock == native_sched_clock; 333 return pv_time_ops.sched_clock == native_sched_clock;
334} 334}
@@ -336,7 +336,7 @@ static inline bool using_native_sched_clock(void)
336unsigned long long 336unsigned long long
337sched_clock(void) __attribute__((alias("native_sched_clock"))); 337sched_clock(void) __attribute__((alias("native_sched_clock")));
338 338
339static inline bool using_native_sched_clock(void) { return true; } 339bool using_native_sched_clock(void) { return true; }
340#endif 340#endif
341 341
342int check_tsc_unstable(void) 342int check_tsc_unstable(void)
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 73ea24d4f119..047b17a26269 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -657,6 +657,9 @@ void kvm_pic_destroy(struct kvm *kvm)
657{ 657{
658 struct kvm_pic *vpic = kvm->arch.vpic; 658 struct kvm_pic *vpic = kvm->arch.vpic;
659 659
660 if (!vpic)
661 return;
662
660 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master); 663 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master);
661 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave); 664 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave);
662 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr); 665 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr);
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 6e219e5c07d2..289270a6aecb 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -635,6 +635,9 @@ void kvm_ioapic_destroy(struct kvm *kvm)
635{ 635{
636 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 636 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
637 637
638 if (!ioapic)
639 return;
640
638 cancel_delayed_work_sync(&ioapic->eoi_inject); 641 cancel_delayed_work_sync(&ioapic->eoi_inject);
639 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); 642 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
640 kvm->arch.vioapic = NULL; 643 kvm->arch.vioapic = NULL;
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c
index 37942e419c32..60168cdd0546 100644
--- a/arch/x86/kvm/page_track.c
+++ b/arch/x86/kvm/page_track.c
@@ -160,6 +160,14 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
160 return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]); 160 return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
161} 161}
162 162
163void kvm_page_track_cleanup(struct kvm *kvm)
164{
165 struct kvm_page_track_notifier_head *head;
166
167 head = &kvm->arch.track_notifier_head;
168 cleanup_srcu_struct(&head->track_srcu);
169}
170
163void kvm_page_track_init(struct kvm *kvm) 171void kvm_page_track_init(struct kvm *kvm)
164{ 172{
165 struct kvm_page_track_notifier_head *head; 173 struct kvm_page_track_notifier_head *head;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d1efe2c62b3f..5fba70646c32 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1379,6 +1379,9 @@ static void avic_vm_destroy(struct kvm *kvm)
1379 unsigned long flags; 1379 unsigned long flags;
1380 struct kvm_arch *vm_data = &kvm->arch; 1380 struct kvm_arch *vm_data = &kvm->arch;
1381 1381
1382 if (!avic)
1383 return;
1384
1382 avic_free_vm_id(vm_data->avic_vm_id); 1385 avic_free_vm_id(vm_data->avic_vm_id);
1383 1386
1384 if (vm_data->avic_logical_id_table_page) 1387 if (vm_data->avic_logical_id_table_page)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 98e82ee1e699..259e9b28ccf8 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1239,6 +1239,11 @@ static inline bool cpu_has_vmx_invvpid_global(void)
1239 return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT; 1239 return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
1240} 1240}
1241 1241
1242static inline bool cpu_has_vmx_invvpid(void)
1243{
1244 return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
1245}
1246
1242static inline bool cpu_has_vmx_ept(void) 1247static inline bool cpu_has_vmx_ept(void)
1243{ 1248{
1244 return vmcs_config.cpu_based_2nd_exec_ctrl & 1249 return vmcs_config.cpu_based_2nd_exec_ctrl &
@@ -2753,7 +2758,6 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2753 SECONDARY_EXEC_RDTSCP | 2758 SECONDARY_EXEC_RDTSCP |
2754 SECONDARY_EXEC_DESC | 2759 SECONDARY_EXEC_DESC |
2755 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2760 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2756 SECONDARY_EXEC_ENABLE_VPID |
2757 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2761 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2758 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2762 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2759 SECONDARY_EXEC_WBINVD_EXITING | 2763 SECONDARY_EXEC_WBINVD_EXITING |
@@ -2781,10 +2785,12 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2781 * though it is treated as global context. The alternative is 2785 * though it is treated as global context. The alternative is
2782 * not failing the single-context invvpid, and it is worse. 2786 * not failing the single-context invvpid, and it is worse.
2783 */ 2787 */
2784 if (enable_vpid) 2788 if (enable_vpid) {
2789 vmx->nested.nested_vmx_secondary_ctls_high |=
2790 SECONDARY_EXEC_ENABLE_VPID;
2785 vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT | 2791 vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
2786 VMX_VPID_EXTENT_SUPPORTED_MASK; 2792 VMX_VPID_EXTENT_SUPPORTED_MASK;
2787 else 2793 } else
2788 vmx->nested.nested_vmx_vpid_caps = 0; 2794 vmx->nested.nested_vmx_vpid_caps = 0;
2789 2795
2790 if (enable_unrestricted_guest) 2796 if (enable_unrestricted_guest)
@@ -4024,6 +4030,12 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
4024 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid); 4030 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid);
4025} 4031}
4026 4032
4033static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
4034{
4035 if (enable_ept)
4036 vmx_flush_tlb(vcpu);
4037}
4038
4027static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) 4039static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
4028{ 4040{
4029 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; 4041 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
@@ -6517,8 +6529,10 @@ static __init int hardware_setup(void)
6517 if (boot_cpu_has(X86_FEATURE_NX)) 6529 if (boot_cpu_has(X86_FEATURE_NX))
6518 kvm_enable_efer_bits(EFER_NX); 6530 kvm_enable_efer_bits(EFER_NX);
6519 6531
6520 if (!cpu_has_vmx_vpid()) 6532 if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
6533 !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
6521 enable_vpid = 0; 6534 enable_vpid = 0;
6535
6522 if (!cpu_has_vmx_shadow_vmcs()) 6536 if (!cpu_has_vmx_shadow_vmcs())
6523 enable_shadow_vmcs = 0; 6537 enable_shadow_vmcs = 0;
6524 if (enable_shadow_vmcs) 6538 if (enable_shadow_vmcs)
@@ -8184,6 +8198,9 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
8184 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); 8198 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
8185 case EXIT_REASON_PREEMPTION_TIMER: 8199 case EXIT_REASON_PREEMPTION_TIMER:
8186 return false; 8200 return false;
8201 case EXIT_REASON_PML_FULL:
8202 /* We don't expose PML support to L1. */
8203 return false;
8187 default: 8204 default:
8188 return true; 8205 return true;
8189 } 8206 }
@@ -8501,7 +8518,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
8501 && kvm_vmx_exit_handlers[exit_reason]) 8518 && kvm_vmx_exit_handlers[exit_reason])
8502 return kvm_vmx_exit_handlers[exit_reason](vcpu); 8519 return kvm_vmx_exit_handlers[exit_reason](vcpu);
8503 else { 8520 else {
8504 WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason); 8521 vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
8522 exit_reason);
8505 kvm_queue_exception(vcpu, UD_VECTOR); 8523 kvm_queue_exception(vcpu, UD_VECTOR);
8506 return 1; 8524 return 1;
8507 } 8525 }
@@ -8547,6 +8565,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
8547 } else { 8565 } else {
8548 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 8566 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
8549 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 8567 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
8568 vmx_flush_tlb_ept_only(vcpu);
8550 } 8569 }
8551 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); 8570 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
8552 8571
@@ -8572,8 +8591,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
8572 */ 8591 */
8573 if (!is_guest_mode(vcpu) || 8592 if (!is_guest_mode(vcpu) ||
8574 !nested_cpu_has2(get_vmcs12(&vmx->vcpu), 8593 !nested_cpu_has2(get_vmcs12(&vmx->vcpu),
8575 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) 8594 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
8576 vmcs_write64(APIC_ACCESS_ADDR, hpa); 8595 vmcs_write64(APIC_ACCESS_ADDR, hpa);
8596 vmx_flush_tlb_ept_only(vcpu);
8597 }
8577} 8598}
8578 8599
8579static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) 8600static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
@@ -9974,7 +9995,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
9974{ 9995{
9975 struct vcpu_vmx *vmx = to_vmx(vcpu); 9996 struct vcpu_vmx *vmx = to_vmx(vcpu);
9976 u32 exec_control; 9997 u32 exec_control;
9977 bool nested_ept_enabled = false;
9978 9998
9979 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); 9999 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
9980 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); 10000 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
@@ -10121,8 +10141,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
10121 vmcs12->guest_intr_status); 10141 vmcs12->guest_intr_status);
10122 } 10142 }
10123 10143
10124 nested_ept_enabled = (exec_control & SECONDARY_EXEC_ENABLE_EPT) != 0;
10125
10126 /* 10144 /*
10127 * Write an illegal value to APIC_ACCESS_ADDR. Later, 10145 * Write an illegal value to APIC_ACCESS_ADDR. Later,
10128 * nested_get_vmcs12_pages will either fix it up or 10146 * nested_get_vmcs12_pages will either fix it up or
@@ -10252,9 +10270,24 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
10252 10270
10253 } 10271 }
10254 10272
10273 if (enable_pml) {
10274 /*
10275 * Conceptually we want to copy the PML address and index from
10276 * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
10277 * since we always flush the log on each vmexit, this happens
10278 * to be equivalent to simply resetting the fields in vmcs02.
10279 */
10280 ASSERT(vmx->pml_pg);
10281 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
10282 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
10283 }
10284
10255 if (nested_cpu_has_ept(vmcs12)) { 10285 if (nested_cpu_has_ept(vmcs12)) {
10256 kvm_mmu_unload(vcpu); 10286 kvm_mmu_unload(vcpu);
10257 nested_ept_init_mmu_context(vcpu); 10287 nested_ept_init_mmu_context(vcpu);
10288 } else if (nested_cpu_has2(vmcs12,
10289 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
10290 vmx_flush_tlb_ept_only(vcpu);
10258 } 10291 }
10259 10292
10260 /* 10293 /*
@@ -10282,12 +10315,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
10282 vmx_set_efer(vcpu, vcpu->arch.efer); 10315 vmx_set_efer(vcpu, vcpu->arch.efer);
10283 10316
10284 /* Shadow page tables on either EPT or shadow page tables. */ 10317 /* Shadow page tables on either EPT or shadow page tables. */
10285 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_ept_enabled, 10318 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
10286 entry_failure_code)) 10319 entry_failure_code))
10287 return 1; 10320 return 1;
10288 10321
10289 kvm_mmu_reset_context(vcpu);
10290
10291 if (!enable_ept) 10322 if (!enable_ept)
10292 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; 10323 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
10293 10324
@@ -11056,6 +11087,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
11056 vmx->nested.change_vmcs01_virtual_x2apic_mode = false; 11087 vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
11057 vmx_set_virtual_x2apic_mode(vcpu, 11088 vmx_set_virtual_x2apic_mode(vcpu,
11058 vcpu->arch.apic_base & X2APIC_ENABLE); 11089 vcpu->arch.apic_base & X2APIC_ENABLE);
11090 } else if (!nested_cpu_has_ept(vmcs12) &&
11091 nested_cpu_has2(vmcs12,
11092 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
11093 vmx_flush_tlb_ept_only(vcpu);
11059 } 11094 }
11060 11095
11061 /* This is needed for same reason as it was needed in prepare_vmcs02 */ 11096 /* This is needed for same reason as it was needed in prepare_vmcs02 */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1faf620a6fdc..ccbd45ecd41a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8153,11 +8153,12 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
8153 if (kvm_x86_ops->vm_destroy) 8153 if (kvm_x86_ops->vm_destroy)
8154 kvm_x86_ops->vm_destroy(kvm); 8154 kvm_x86_ops->vm_destroy(kvm);
8155 kvm_iommu_unmap_guest(kvm); 8155 kvm_iommu_unmap_guest(kvm);
8156 kfree(kvm->arch.vpic); 8156 kvm_pic_destroy(kvm);
8157 kfree(kvm->arch.vioapic); 8157 kvm_ioapic_destroy(kvm);
8158 kvm_free_vcpus(kvm); 8158 kvm_free_vcpus(kvm);
8159 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); 8159 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
8160 kvm_mmu_uninit_vm(kvm); 8160 kvm_mmu_uninit_vm(kvm);
8161 kvm_page_track_cleanup(kvm);
8161} 8162}
8162 8163
8163void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 8164void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
@@ -8566,11 +8567,11 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
8566{ 8567{
8567 struct x86_exception fault; 8568 struct x86_exception fault;
8568 8569
8569 trace_kvm_async_pf_ready(work->arch.token, work->gva);
8570 if (work->wakeup_all) 8570 if (work->wakeup_all)
8571 work->arch.token = ~0; /* broadcast wakeup */ 8571 work->arch.token = ~0; /* broadcast wakeup */
8572 else 8572 else
8573 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); 8573 kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
8574 trace_kvm_async_pf_ready(work->arch.token, work->gva);
8574 8575
8575 if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && 8576 if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
8576 !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { 8577 !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 779782f58324..9a53a06e5a3e 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled)
290 _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail) 290 _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail)
291 _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail) 291 _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail)
292 _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail) 292 _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail)
293 _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail) 293 _ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail)
294 _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail) 294 _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
295 _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail) 295 _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail)
296 _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail) 296 _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 22af912d66d2..889e7619a091 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -643,21 +643,40 @@ void __init init_mem_mapping(void)
643 * devmem_is_allowed() checks to see if /dev/mem access to a certain address 643 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
644 * is valid. The argument is a physical page number. 644 * is valid. The argument is a physical page number.
645 * 645 *
646 * 646 * On x86, access has to be given to the first megabyte of RAM because that
647 * On x86, access has to be given to the first megabyte of ram because that area 647 * area traditionally contains BIOS code and data regions used by X, dosemu,
648 * contains BIOS code and data regions used by X and dosemu and similar apps. 648 * and similar apps. Since they map the entire memory range, the whole range
649 * Access has to be given to non-kernel-ram areas as well, these contain the PCI 649 * must be allowed (for mapping), but any areas that would otherwise be
650 * mmio resources as well as potential bios/acpi data regions. 650 * disallowed are flagged as being "zero filled" instead of rejected.
651 * Access has to be given to non-kernel-ram areas as well, these contain the
652 * PCI mmio resources as well as potential bios/acpi data regions.
651 */ 653 */
652int devmem_is_allowed(unsigned long pagenr) 654int devmem_is_allowed(unsigned long pagenr)
653{ 655{
654 if (pagenr < 256) 656 if (page_is_ram(pagenr)) {
655 return 1; 657 /*
656 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) 658 * For disallowed memory regions in the low 1MB range,
659 * request that the page be shown as all zeros.
660 */
661 if (pagenr < 256)
662 return 2;
663
664 return 0;
665 }
666
667 /*
668 * This must follow RAM test, since System RAM is considered a
669 * restricted resource under CONFIG_STRICT_IOMEM.
670 */
671 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
672 /* Low 1MB bypasses iomem restrictions. */
673 if (pagenr < 256)
674 return 1;
675
657 return 0; 676 return 0;
658 if (!page_is_ram(pagenr)) 677 }
659 return 1; 678
660 return 0; 679 return 1;
661} 680}
662 681
663void free_init_pages(char *what, unsigned long begin, unsigned long end) 682void free_init_pages(char *what, unsigned long begin, unsigned long end)
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 887e57182716..aed206475aa7 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -48,7 +48,7 @@ static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
48#if defined(CONFIG_X86_ESPFIX64) 48#if defined(CONFIG_X86_ESPFIX64)
49static const unsigned long vaddr_end = ESPFIX_BASE_ADDR; 49static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
50#elif defined(CONFIG_EFI) 50#elif defined(CONFIG_EFI)
51static const unsigned long vaddr_end = EFI_VA_START; 51static const unsigned long vaddr_end = EFI_VA_END;
52#else 52#else
53static const unsigned long vaddr_end = __START_KERNEL_map; 53static const unsigned long vaddr_end = __START_KERNEL_map;
54#endif 54#endif
@@ -105,7 +105,7 @@ void __init kernel_randomize_memory(void)
105 */ 105 */
106 BUILD_BUG_ON(vaddr_start >= vaddr_end); 106 BUILD_BUG_ON(vaddr_start >= vaddr_end);
107 BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) && 107 BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
108 vaddr_end >= EFI_VA_START); 108 vaddr_end >= EFI_VA_END);
109 BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) || 109 BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
110 IS_ENABLED(CONFIG_EFI)) && 110 IS_ENABLED(CONFIG_EFI)) &&
111 vaddr_end >= __START_KERNEL_map); 111 vaddr_end >= __START_KERNEL_map);
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 30031d5293c4..cdfe8c628959 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -201,6 +201,10 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
201 return; 201 return;
202 } 202 }
203 203
204 /* No need to reserve regions that will never be freed. */
205 if (md.attribute & EFI_MEMORY_RUNTIME)
206 return;
207
204 size += addr % EFI_PAGE_SIZE; 208 size += addr % EFI_PAGE_SIZE;
205 size = round_up(size, EFI_PAGE_SIZE); 209 size = round_up(size, EFI_PAGE_SIZE);
206 addr = round_down(addr, EFI_PAGE_SIZE); 210 addr = round_down(addr, EFI_PAGE_SIZE);
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 555b9fa0ad43..7dbdb780264d 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -8,6 +8,7 @@ PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
8LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib 8LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
9targets += purgatory.ro 9targets += purgatory.ro
10 10
11KASAN_SANITIZE := n
11KCOV_INSTRUMENT := n 12KCOV_INSTRUMENT := n
12 13
13# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That 14# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 976b1d70edbc..4ddbfd57a7c8 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -164,8 +164,21 @@ void copy_user_highpage(struct page *to, struct page *from,
164 164
165#define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) 165#define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
166 166
167#ifdef CONFIG_MMU
168static inline unsigned long ___pa(unsigned long va)
169{
170 unsigned long off = va - PAGE_OFFSET;
171
172 if (off >= XCHAL_KSEG_SIZE)
173 off -= XCHAL_KSEG_SIZE;
174
175 return off + PHYS_OFFSET;
176}
177#define __pa(x) ___pa((unsigned long)(x))
178#else
167#define __pa(x) \ 179#define __pa(x) \
168 ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET) 180 ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
181#endif
169#define __va(x) \ 182#define __va(x) \
170 ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET)) 183 ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
171#define pfn_valid(pfn) \ 184#define pfn_valid(pfn) \
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
index cd400af4a6b2..6be7eb27fd29 100644
--- a/arch/xtensa/include/uapi/asm/unistd.h
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -774,7 +774,10 @@ __SYSCALL(349, sys_pkey_alloc, 2)
774#define __NR_pkey_free 350 774#define __NR_pkey_free 350
775__SYSCALL(350, sys_pkey_free, 1) 775__SYSCALL(350, sys_pkey_free, 1)
776 776
777#define __NR_syscall_count 351 777#define __NR_statx 351
778__SYSCALL(351, sys_statx, 5)
779
780#define __NR_syscall_count 352
778 781
779/* 782/*
780 * sysxtensa syscall handler 783 * sysxtensa syscall handler
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index c82c43bff296..bae697a06a98 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -483,10 +483,8 @@ void show_regs(struct pt_regs * regs)
483 483
484static int show_trace_cb(struct stackframe *frame, void *data) 484static int show_trace_cb(struct stackframe *frame, void *data)
485{ 485{
486 if (kernel_text_address(frame->pc)) { 486 if (kernel_text_address(frame->pc))
487 pr_cont(" [<%08lx>]", frame->pc); 487 pr_cont(" [<%08lx>] %pB\n", frame->pc, (void *)frame->pc);
488 print_symbol(" %s\n", frame->pc);
489 }
490 return 0; 488 return 0;
491} 489}
492 490
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 09af8ff18719..c974a1bbf4cb 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -171,7 +171,8 @@ void blk_mq_sched_put_request(struct request *rq)
171 171
172void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) 172void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
173{ 173{
174 struct elevator_queue *e = hctx->queue->elevator; 174 struct request_queue *q = hctx->queue;
175 struct elevator_queue *e = q->elevator;
175 const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; 176 const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
176 bool did_work = false; 177 bool did_work = false;
177 LIST_HEAD(rq_list); 178 LIST_HEAD(rq_list);
@@ -203,10 +204,10 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
203 */ 204 */
204 if (!list_empty(&rq_list)) { 205 if (!list_empty(&rq_list)) {
205 blk_mq_sched_mark_restart_hctx(hctx); 206 blk_mq_sched_mark_restart_hctx(hctx);
206 did_work = blk_mq_dispatch_rq_list(hctx, &rq_list); 207 did_work = blk_mq_dispatch_rq_list(q, &rq_list);
207 } else if (!has_sched_dispatch) { 208 } else if (!has_sched_dispatch) {
208 blk_mq_flush_busy_ctxs(hctx, &rq_list); 209 blk_mq_flush_busy_ctxs(hctx, &rq_list);
209 blk_mq_dispatch_rq_list(hctx, &rq_list); 210 blk_mq_dispatch_rq_list(q, &rq_list);
210 } 211 }
211 212
212 /* 213 /*
@@ -222,7 +223,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
222 if (!rq) 223 if (!rq)
223 break; 224 break;
224 list_add(&rq->queuelist, &rq_list); 225 list_add(&rq->queuelist, &rq_list);
225 } while (blk_mq_dispatch_rq_list(hctx, &rq_list)); 226 } while (blk_mq_dispatch_rq_list(q, &rq_list));
226 } 227 }
227} 228}
228 229
@@ -317,25 +318,68 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
317 return true; 318 return true;
318} 319}
319 320
320static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) 321static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
321{ 322{
322 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) { 323 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
323 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 324 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
324 if (blk_mq_hctx_has_pending(hctx)) 325 if (blk_mq_hctx_has_pending(hctx)) {
325 blk_mq_run_hw_queue(hctx, true); 326 blk_mq_run_hw_queue(hctx, true);
327 return true;
328 }
326 } 329 }
330 return false;
327} 331}
328 332
329void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx) 333/**
330{ 334 * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
331 struct request_queue *q = hctx->queue; 335 * @pos: loop cursor.
332 unsigned int i; 336 * @skip: the list element that will not be examined. Iteration starts at
337 * @skip->next.
338 * @head: head of the list to examine. This list must have at least one
339 * element, namely @skip.
340 * @member: name of the list_head structure within typeof(*pos).
341 */
342#define list_for_each_entry_rcu_rr(pos, skip, head, member) \
343 for ((pos) = (skip); \
344 (pos = (pos)->member.next != (head) ? list_entry_rcu( \
345 (pos)->member.next, typeof(*pos), member) : \
346 list_entry_rcu((pos)->member.next->next, typeof(*pos), member)), \
347 (pos) != (skip); )
333 348
334 if (test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) { 349/*
335 if (test_and_clear_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) { 350 * Called after a driver tag has been freed to check whether a hctx needs to
336 queue_for_each_hw_ctx(q, hctx, i) 351 * be restarted. Restarts @hctx if its tag set is not shared. Restarts hardware
337 blk_mq_sched_restart_hctx(hctx); 352 * queues in a round-robin fashion if the tag set of @hctx is shared with other
353 * hardware queues.
354 */
355void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
356{
357 struct blk_mq_tags *const tags = hctx->tags;
358 struct blk_mq_tag_set *const set = hctx->queue->tag_set;
359 struct request_queue *const queue = hctx->queue, *q;
360 struct blk_mq_hw_ctx *hctx2;
361 unsigned int i, j;
362
363 if (set->flags & BLK_MQ_F_TAG_SHARED) {
364 rcu_read_lock();
365 list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
366 tag_set_list) {
367 queue_for_each_hw_ctx(q, hctx2, i)
368 if (hctx2->tags == tags &&
369 blk_mq_sched_restart_hctx(hctx2))
370 goto done;
371 }
372 j = hctx->queue_num + 1;
373 for (i = 0; i < queue->nr_hw_queues; i++, j++) {
374 if (j == queue->nr_hw_queues)
375 j = 0;
376 hctx2 = queue->queue_hw_ctx[j];
377 if (hctx2->tags == tags &&
378 blk_mq_sched_restart_hctx(hctx2))
379 break;
338 } 380 }
381done:
382 rcu_read_unlock();
339 } else { 383 } else {
340 blk_mq_sched_restart_hctx(hctx); 384 blk_mq_sched_restart_hctx(hctx);
341 } 385 }
@@ -431,11 +475,67 @@ static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
431 } 475 }
432} 476}
433 477
434int blk_mq_sched_setup(struct request_queue *q) 478static int blk_mq_sched_alloc_tags(struct request_queue *q,
479 struct blk_mq_hw_ctx *hctx,
480 unsigned int hctx_idx)
481{
482 struct blk_mq_tag_set *set = q->tag_set;
483 int ret;
484
485 hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
486 set->reserved_tags);
487 if (!hctx->sched_tags)
488 return -ENOMEM;
489
490 ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
491 if (ret)
492 blk_mq_sched_free_tags(set, hctx, hctx_idx);
493
494 return ret;
495}
496
497static void blk_mq_sched_tags_teardown(struct request_queue *q)
435{ 498{
436 struct blk_mq_tag_set *set = q->tag_set; 499 struct blk_mq_tag_set *set = q->tag_set;
437 struct blk_mq_hw_ctx *hctx; 500 struct blk_mq_hw_ctx *hctx;
438 int ret, i; 501 int i;
502
503 queue_for_each_hw_ctx(q, hctx, i)
504 blk_mq_sched_free_tags(set, hctx, i);
505}
506
507int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
508 unsigned int hctx_idx)
509{
510 struct elevator_queue *e = q->elevator;
511
512 if (!e)
513 return 0;
514
515 return blk_mq_sched_alloc_tags(q, hctx, hctx_idx);
516}
517
518void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
519 unsigned int hctx_idx)
520{
521 struct elevator_queue *e = q->elevator;
522
523 if (!e)
524 return;
525
526 blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
527}
528
529int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
530{
531 struct blk_mq_hw_ctx *hctx;
532 unsigned int i;
533 int ret;
534
535 if (!e) {
536 q->elevator = NULL;
537 return 0;
538 }
439 539
440 /* 540 /*
441 * Default to 256, since we don't split into sync/async like the 541 * Default to 256, since we don't split into sync/async like the
@@ -443,49 +543,30 @@ int blk_mq_sched_setup(struct request_queue *q)
443 */ 543 */
444 q->nr_requests = 2 * BLKDEV_MAX_RQ; 544 q->nr_requests = 2 * BLKDEV_MAX_RQ;
445 545
446 /*
447 * We're switching to using an IO scheduler, so setup the hctx
448 * scheduler tags and switch the request map from the regular
449 * tags to scheduler tags. First allocate what we need, so we
450 * can safely fail and fallback, if needed.
451 */
452 ret = 0;
453 queue_for_each_hw_ctx(q, hctx, i) { 546 queue_for_each_hw_ctx(q, hctx, i) {
454 hctx->sched_tags = blk_mq_alloc_rq_map(set, i, 547 ret = blk_mq_sched_alloc_tags(q, hctx, i);
455 q->nr_requests, set->reserved_tags);
456 if (!hctx->sched_tags) {
457 ret = -ENOMEM;
458 break;
459 }
460 ret = blk_mq_alloc_rqs(set, hctx->sched_tags, i, q->nr_requests);
461 if (ret) 548 if (ret)
462 break; 549 goto err;
463 } 550 }
464 551
465 /* 552 ret = e->ops.mq.init_sched(q, e);
466 * If we failed, free what we did allocate 553 if (ret)
467 */ 554 goto err;
468 if (ret) {
469 queue_for_each_hw_ctx(q, hctx, i) {
470 if (!hctx->sched_tags)
471 continue;
472 blk_mq_sched_free_tags(set, hctx, i);
473 }
474
475 return ret;
476 }
477 555
478 return 0; 556 return 0;
557
558err:
559 blk_mq_sched_tags_teardown(q);
560 q->elevator = NULL;
561 return ret;
479} 562}
480 563
481void blk_mq_sched_teardown(struct request_queue *q) 564void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
482{ 565{
483 struct blk_mq_tag_set *set = q->tag_set; 566 if (e->type->ops.mq.exit_sched)
484 struct blk_mq_hw_ctx *hctx; 567 e->type->ops.mq.exit_sched(e);
485 int i; 568 blk_mq_sched_tags_teardown(q);
486 569 q->elevator = NULL;
487 queue_for_each_hw_ctx(q, hctx, i)
488 blk_mq_sched_free_tags(set, hctx, i);
489} 570}
490 571
491int blk_mq_sched_init(struct request_queue *q) 572int blk_mq_sched_init(struct request_queue *q)
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index a75b16b123f7..3a9e6e40558b 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -19,7 +19,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
19 struct request **merged_request); 19 struct request **merged_request);
20bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio); 20bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
21bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); 21bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
22void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx); 22void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
23 23
24void blk_mq_sched_insert_request(struct request *rq, bool at_head, 24void blk_mq_sched_insert_request(struct request *rq, bool at_head,
25 bool run_queue, bool async, bool can_block); 25 bool run_queue, bool async, bool can_block);
@@ -32,8 +32,13 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
32 struct list_head *rq_list, 32 struct list_head *rq_list,
33 struct request *(*get_rq)(struct blk_mq_hw_ctx *)); 33 struct request *(*get_rq)(struct blk_mq_hw_ctx *));
34 34
35int blk_mq_sched_setup(struct request_queue *q); 35int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
36void blk_mq_sched_teardown(struct request_queue *q); 36void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
37
38int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
39 unsigned int hctx_idx);
40void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
41 unsigned int hctx_idx);
37 42
38int blk_mq_sched_init(struct request_queue *q); 43int blk_mq_sched_init(struct request_queue *q);
39 44
@@ -131,20 +136,6 @@ static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
131 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 136 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
132} 137}
133 138
134/*
135 * Mark a hardware queue and the request queue it belongs to as needing a
136 * restart.
137 */
138static inline void blk_mq_sched_mark_restart_queue(struct blk_mq_hw_ctx *hctx)
139{
140 struct request_queue *q = hctx->queue;
141
142 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
143 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
144 if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags))
145 set_bit(QUEUE_FLAG_RESTART, &q->queue_flags);
146}
147
148static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) 139static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
149{ 140{
150 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 141 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a4546f060e80..c7836a1ded97 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -321,7 +321,6 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
321 321
322 rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data); 322 rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
323 323
324 blk_mq_put_ctx(alloc_data.ctx);
325 blk_queue_exit(q); 324 blk_queue_exit(q);
326 325
327 if (!rq) 326 if (!rq)
@@ -349,7 +348,7 @@ void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
349 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); 348 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
350 if (sched_tag != -1) 349 if (sched_tag != -1)
351 blk_mq_sched_completed_request(hctx, rq); 350 blk_mq_sched_completed_request(hctx, rq);
352 blk_mq_sched_restart_queues(hctx); 351 blk_mq_sched_restart(hctx);
353 blk_queue_exit(q); 352 blk_queue_exit(q);
354} 353}
355 354
@@ -697,17 +696,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
697{ 696{
698 struct blk_mq_timeout_data *data = priv; 697 struct blk_mq_timeout_data *data = priv;
699 698
700 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { 699 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
701 /*
702 * If a request wasn't started before the queue was
703 * marked dying, kill it here or it'll go unnoticed.
704 */
705 if (unlikely(blk_queue_dying(rq->q))) {
706 rq->errors = -EIO;
707 blk_mq_end_request(rq, rq->errors);
708 }
709 return; 700 return;
710 }
711 701
712 if (time_after_eq(jiffies, rq->deadline)) { 702 if (time_after_eq(jiffies, rq->deadline)) {
713 if (!blk_mark_rq_complete(rq)) 703 if (!blk_mark_rq_complete(rq))
@@ -855,12 +845,8 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
855 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT, 845 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
856 }; 846 };
857 847
858 if (rq->tag != -1) { 848 if (rq->tag != -1)
859done: 849 goto done;
860 if (hctx)
861 *hctx = data.hctx;
862 return true;
863 }
864 850
865 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag)) 851 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
866 data.flags |= BLK_MQ_REQ_RESERVED; 852 data.flags |= BLK_MQ_REQ_RESERVED;
@@ -872,10 +858,12 @@ done:
872 atomic_inc(&data.hctx->nr_active); 858 atomic_inc(&data.hctx->nr_active);
873 } 859 }
874 data.hctx->tags->rqs[rq->tag] = rq; 860 data.hctx->tags->rqs[rq->tag] = rq;
875 goto done;
876 } 861 }
877 862
878 return false; 863done:
864 if (hctx)
865 *hctx = data.hctx;
866 return rq->tag != -1;
879} 867}
880 868
881static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, 869static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
@@ -972,13 +960,16 @@ static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
972 return true; 960 return true;
973} 961}
974 962
975bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) 963bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
976{ 964{
977 struct request_queue *q = hctx->queue; 965 struct blk_mq_hw_ctx *hctx;
978 struct request *rq; 966 struct request *rq;
979 LIST_HEAD(driver_list); 967 LIST_HEAD(driver_list);
980 struct list_head *dptr; 968 struct list_head *dptr;
981 int queued, ret = BLK_MQ_RQ_QUEUE_OK; 969 int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
970
971 if (list_empty(list))
972 return false;
982 973
983 /* 974 /*
984 * Start off with dptr being NULL, so we start the first request 975 * Start off with dptr being NULL, so we start the first request
@@ -989,8 +980,8 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
989 /* 980 /*
990 * Now process all the entries, sending them to the driver. 981 * Now process all the entries, sending them to the driver.
991 */ 982 */
992 queued = 0; 983 errors = queued = 0;
993 while (!list_empty(list)) { 984 do {
994 struct blk_mq_queue_data bd; 985 struct blk_mq_queue_data bd;
995 986
996 rq = list_first_entry(list, struct request, queuelist); 987 rq = list_first_entry(list, struct request, queuelist);
@@ -1046,6 +1037,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1046 default: 1037 default:
1047 pr_err("blk-mq: bad return on queue: %d\n", ret); 1038 pr_err("blk-mq: bad return on queue: %d\n", ret);
1048 case BLK_MQ_RQ_QUEUE_ERROR: 1039 case BLK_MQ_RQ_QUEUE_ERROR:
1040 errors++;
1049 rq->errors = -EIO; 1041 rq->errors = -EIO;
1050 blk_mq_end_request(rq, rq->errors); 1042 blk_mq_end_request(rq, rq->errors);
1051 break; 1043 break;
@@ -1060,7 +1052,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1060 */ 1052 */
1061 if (!dptr && list->next != list->prev) 1053 if (!dptr && list->next != list->prev)
1062 dptr = &driver_list; 1054 dptr = &driver_list;
1063 } 1055 } while (!list_empty(list));
1064 1056
1065 hctx->dispatched[queued_to_index(queued)]++; 1057 hctx->dispatched[queued_to_index(queued)]++;
1066 1058
@@ -1097,7 +1089,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1097 blk_mq_run_hw_queue(hctx, true); 1089 blk_mq_run_hw_queue(hctx, true);
1098 } 1090 }
1099 1091
1100 return queued != 0; 1092 return (queued + errors) != 0;
1101} 1093}
1102 1094
1103static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) 1095static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
@@ -1143,7 +1135,8 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1143 return hctx->next_cpu; 1135 return hctx->next_cpu;
1144} 1136}
1145 1137
1146void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 1138static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1139 unsigned long msecs)
1147{ 1140{
1148 if (unlikely(blk_mq_hctx_stopped(hctx) || 1141 if (unlikely(blk_mq_hctx_stopped(hctx) ||
1149 !blk_mq_hw_queue_mapped(hctx))) 1142 !blk_mq_hw_queue_mapped(hctx)))
@@ -1160,7 +1153,24 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1160 put_cpu(); 1153 put_cpu();
1161 } 1154 }
1162 1155
1163 kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work); 1156 if (msecs == 0)
1157 kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx),
1158 &hctx->run_work);
1159 else
1160 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1161 &hctx->delayed_run_work,
1162 msecs_to_jiffies(msecs));
1163}
1164
1165void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1166{
1167 __blk_mq_delay_run_hw_queue(hctx, true, msecs);
1168}
1169EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1170
1171void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1172{
1173 __blk_mq_delay_run_hw_queue(hctx, async, 0);
1164} 1174}
1165 1175
1166void blk_mq_run_hw_queues(struct request_queue *q, bool async) 1176void blk_mq_run_hw_queues(struct request_queue *q, bool async)
@@ -1263,6 +1273,15 @@ static void blk_mq_run_work_fn(struct work_struct *work)
1263 __blk_mq_run_hw_queue(hctx); 1273 __blk_mq_run_hw_queue(hctx);
1264} 1274}
1265 1275
1276static void blk_mq_delayed_run_work_fn(struct work_struct *work)
1277{
1278 struct blk_mq_hw_ctx *hctx;
1279
1280 hctx = container_of(work, struct blk_mq_hw_ctx, delayed_run_work.work);
1281
1282 __blk_mq_run_hw_queue(hctx);
1283}
1284
1266static void blk_mq_delay_work_fn(struct work_struct *work) 1285static void blk_mq_delay_work_fn(struct work_struct *work)
1267{ 1286{
1268 struct blk_mq_hw_ctx *hctx; 1287 struct blk_mq_hw_ctx *hctx;
@@ -1932,6 +1951,8 @@ static void blk_mq_exit_hctx(struct request_queue *q,
1932 hctx->fq->flush_rq, hctx_idx, 1951 hctx->fq->flush_rq, hctx_idx,
1933 flush_start_tag + hctx_idx); 1952 flush_start_tag + hctx_idx);
1934 1953
1954 blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
1955
1935 if (set->ops->exit_hctx) 1956 if (set->ops->exit_hctx)
1936 set->ops->exit_hctx(hctx, hctx_idx); 1957 set->ops->exit_hctx(hctx, hctx_idx);
1937 1958
@@ -1968,6 +1989,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
1968 node = hctx->numa_node = set->numa_node; 1989 node = hctx->numa_node = set->numa_node;
1969 1990
1970 INIT_WORK(&hctx->run_work, blk_mq_run_work_fn); 1991 INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
1992 INIT_DELAYED_WORK(&hctx->delayed_run_work, blk_mq_delayed_run_work_fn);
1971 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn); 1993 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1972 spin_lock_init(&hctx->lock); 1994 spin_lock_init(&hctx->lock);
1973 INIT_LIST_HEAD(&hctx->dispatch); 1995 INIT_LIST_HEAD(&hctx->dispatch);
@@ -1998,9 +2020,12 @@ static int blk_mq_init_hctx(struct request_queue *q,
1998 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) 2020 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1999 goto free_bitmap; 2021 goto free_bitmap;
2000 2022
2023 if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
2024 goto exit_hctx;
2025
2001 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); 2026 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
2002 if (!hctx->fq) 2027 if (!hctx->fq)
2003 goto exit_hctx; 2028 goto sched_exit_hctx;
2004 2029
2005 if (set->ops->init_request && 2030 if (set->ops->init_request &&
2006 set->ops->init_request(set->driver_data, 2031 set->ops->init_request(set->driver_data,
@@ -2015,6 +2040,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
2015 2040
2016 free_fq: 2041 free_fq:
2017 kfree(hctx->fq); 2042 kfree(hctx->fq);
2043 sched_exit_hctx:
2044 blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
2018 exit_hctx: 2045 exit_hctx:
2019 if (set->ops->exit_hctx) 2046 if (set->ops->exit_hctx)
2020 set->ops->exit_hctx(hctx, hctx_idx); 2047 set->ops->exit_hctx(hctx, hctx_idx);
@@ -2241,8 +2268,6 @@ void blk_mq_release(struct request_queue *q)
2241 struct blk_mq_hw_ctx *hctx; 2268 struct blk_mq_hw_ctx *hctx;
2242 unsigned int i; 2269 unsigned int i;
2243 2270
2244 blk_mq_sched_teardown(q);
2245
2246 /* hctx kobj stays in hctx */ 2271 /* hctx kobj stays in hctx */
2247 queue_for_each_hw_ctx(q, hctx, i) { 2272 queue_for_each_hw_ctx(q, hctx, i) {
2248 if (!hctx) 2273 if (!hctx)
@@ -2573,6 +2598,14 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2573 return 0; 2598 return 0;
2574} 2599}
2575 2600
2601static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
2602{
2603 if (set->ops->map_queues)
2604 return set->ops->map_queues(set);
2605 else
2606 return blk_mq_map_queues(set);
2607}
2608
2576/* 2609/*
2577 * Alloc a tag set to be associated with one or more request queues. 2610 * Alloc a tag set to be associated with one or more request queues.
2578 * May fail with EINVAL for various error conditions. May adjust the 2611 * May fail with EINVAL for various error conditions. May adjust the
@@ -2627,10 +2660,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2627 if (!set->mq_map) 2660 if (!set->mq_map)
2628 goto out_free_tags; 2661 goto out_free_tags;
2629 2662
2630 if (set->ops->map_queues) 2663 ret = blk_mq_update_queue_map(set);
2631 ret = set->ops->map_queues(set);
2632 else
2633 ret = blk_mq_map_queues(set);
2634 if (ret) 2664 if (ret)
2635 goto out_free_mq_map; 2665 goto out_free_mq_map;
2636 2666
@@ -2722,6 +2752,7 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2722 blk_mq_freeze_queue(q); 2752 blk_mq_freeze_queue(q);
2723 2753
2724 set->nr_hw_queues = nr_hw_queues; 2754 set->nr_hw_queues = nr_hw_queues;
2755 blk_mq_update_queue_map(set);
2725 list_for_each_entry(q, &set->tag_list, tag_set_list) { 2756 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2726 blk_mq_realloc_hw_ctxs(set, q); 2757 blk_mq_realloc_hw_ctxs(set, q);
2727 2758
@@ -2897,8 +2928,17 @@ bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
2897 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; 2928 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
2898 if (!blk_qc_t_is_internal(cookie)) 2929 if (!blk_qc_t_is_internal(cookie))
2899 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); 2930 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
2900 else 2931 else {
2901 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); 2932 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
2933 /*
2934 * With scheduling, if the request has completed, we'll
2935 * get a NULL return here, as we clear the sched tag when
2936 * that happens. The request still remains valid, like always,
2937 * so we should be safe with just the NULL check.
2938 */
2939 if (!rq)
2940 return false;
2941 }
2902 2942
2903 return __blk_mq_poll(hctx, rq); 2943 return __blk_mq_poll(hctx, rq);
2904} 2944}
diff --git a/block/blk-mq.h b/block/blk-mq.h
index b79f9a7d8cf6..660a17e1d033 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -31,7 +31,7 @@ void blk_mq_freeze_queue(struct request_queue *q);
31void blk_mq_free_queue(struct request_queue *q); 31void blk_mq_free_queue(struct request_queue *q);
32int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); 32int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
33void blk_mq_wake_waiters(struct request_queue *q); 33void blk_mq_wake_waiters(struct request_queue *q);
34bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *); 34bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *);
35void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); 35void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
36bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx); 36bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
37bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, 37bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 9b43efb8933f..186fcb981e9b 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -30,11 +30,11 @@ static void blk_stat_flush_batch(struct blk_rq_stat *stat)
30 30
31static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src) 31static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
32{ 32{
33 blk_stat_flush_batch(src);
34
33 if (!src->nr_samples) 35 if (!src->nr_samples)
34 return; 36 return;
35 37
36 blk_stat_flush_batch(src);
37
38 dst->min = min(dst->min, src->min); 38 dst->min = min(dst->min, src->min);
39 dst->max = max(dst->max, src->max); 39 dst->max = max(dst->max, src->max);
40 40
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index c44b321335f3..37f0b3ad635e 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -816,7 +816,7 @@ static void blk_release_queue(struct kobject *kobj)
816 816
817 if (q->elevator) { 817 if (q->elevator) {
818 ioc_clear_queue(q); 818 ioc_clear_queue(q);
819 elevator_exit(q->elevator); 819 elevator_exit(q, q->elevator);
820 } 820 }
821 821
822 blk_exit_rl(&q->root_rl); 822 blk_exit_rl(&q->root_rl);
diff --git a/block/elevator.c b/block/elevator.c
index 01139f549b5b..4d9084a14c10 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -242,26 +242,21 @@ int elevator_init(struct request_queue *q, char *name)
242 } 242 }
243 } 243 }
244 244
245 if (e->uses_mq) { 245 if (e->uses_mq)
246 err = blk_mq_sched_setup(q); 246 err = blk_mq_init_sched(q, e);
247 if (!err) 247 else
248 err = e->ops.mq.init_sched(q, e);
249 } else
250 err = e->ops.sq.elevator_init_fn(q, e); 248 err = e->ops.sq.elevator_init_fn(q, e);
251 if (err) { 249 if (err)
252 if (e->uses_mq)
253 blk_mq_sched_teardown(q);
254 elevator_put(e); 250 elevator_put(e);
255 }
256 return err; 251 return err;
257} 252}
258EXPORT_SYMBOL(elevator_init); 253EXPORT_SYMBOL(elevator_init);
259 254
260void elevator_exit(struct elevator_queue *e) 255void elevator_exit(struct request_queue *q, struct elevator_queue *e)
261{ 256{
262 mutex_lock(&e->sysfs_lock); 257 mutex_lock(&e->sysfs_lock);
263 if (e->uses_mq && e->type->ops.mq.exit_sched) 258 if (e->uses_mq && e->type->ops.mq.exit_sched)
264 e->type->ops.mq.exit_sched(e); 259 blk_mq_exit_sched(q, e);
265 else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn) 260 else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
266 e->type->ops.sq.elevator_exit_fn(e); 261 e->type->ops.sq.elevator_exit_fn(e);
267 mutex_unlock(&e->sysfs_lock); 262 mutex_unlock(&e->sysfs_lock);
@@ -946,6 +941,45 @@ void elv_unregister(struct elevator_type *e)
946} 941}
947EXPORT_SYMBOL_GPL(elv_unregister); 942EXPORT_SYMBOL_GPL(elv_unregister);
948 943
944static int elevator_switch_mq(struct request_queue *q,
945 struct elevator_type *new_e)
946{
947 int ret;
948
949 blk_mq_freeze_queue(q);
950 blk_mq_quiesce_queue(q);
951
952 if (q->elevator) {
953 if (q->elevator->registered)
954 elv_unregister_queue(q);
955 ioc_clear_queue(q);
956 elevator_exit(q, q->elevator);
957 }
958
959 ret = blk_mq_init_sched(q, new_e);
960 if (ret)
961 goto out;
962
963 if (new_e) {
964 ret = elv_register_queue(q);
965 if (ret) {
966 elevator_exit(q, q->elevator);
967 goto out;
968 }
969 }
970
971 if (new_e)
972 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
973 else
974 blk_add_trace_msg(q, "elv switch: none");
975
976out:
977 blk_mq_unfreeze_queue(q);
978 blk_mq_start_stopped_hw_queues(q, true);
979 return ret;
980
981}
982
949/* 983/*
950 * switch to new_e io scheduler. be careful not to introduce deadlocks - 984 * switch to new_e io scheduler. be careful not to introduce deadlocks -
951 * we don't free the old io scheduler, before we have allocated what we 985 * we don't free the old io scheduler, before we have allocated what we
@@ -958,10 +992,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
958 bool old_registered = false; 992 bool old_registered = false;
959 int err; 993 int err;
960 994
961 if (q->mq_ops) { 995 if (q->mq_ops)
962 blk_mq_freeze_queue(q); 996 return elevator_switch_mq(q, new_e);
963 blk_mq_quiesce_queue(q);
964 }
965 997
966 /* 998 /*
967 * Turn on BYPASS and drain all requests w/ elevator private data. 999 * Turn on BYPASS and drain all requests w/ elevator private data.
@@ -973,11 +1005,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
973 if (old) { 1005 if (old) {
974 old_registered = old->registered; 1006 old_registered = old->registered;
975 1007
976 if (old->uses_mq) 1008 blk_queue_bypass_start(q);
977 blk_mq_sched_teardown(q);
978
979 if (!q->mq_ops)
980 blk_queue_bypass_start(q);
981 1009
982 /* unregister and clear all auxiliary data of the old elevator */ 1010 /* unregister and clear all auxiliary data of the old elevator */
983 if (old_registered) 1011 if (old_registered)
@@ -987,56 +1015,32 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
987 } 1015 }
988 1016
989 /* allocate, init and register new elevator */ 1017 /* allocate, init and register new elevator */
990 if (new_e) { 1018 err = new_e->ops.sq.elevator_init_fn(q, new_e);
991 if (new_e->uses_mq) { 1019 if (err)
992 err = blk_mq_sched_setup(q); 1020 goto fail_init;
993 if (!err)
994 err = new_e->ops.mq.init_sched(q, new_e);
995 } else
996 err = new_e->ops.sq.elevator_init_fn(q, new_e);
997 if (err)
998 goto fail_init;
999 1021
1000 err = elv_register_queue(q); 1022 err = elv_register_queue(q);
1001 if (err) 1023 if (err)
1002 goto fail_register; 1024 goto fail_register;
1003 } else
1004 q->elevator = NULL;
1005 1025
1006 /* done, kill the old one and finish */ 1026 /* done, kill the old one and finish */
1007 if (old) { 1027 if (old) {
1008 elevator_exit(old); 1028 elevator_exit(q, old);
1009 if (!q->mq_ops) 1029 blk_queue_bypass_end(q);
1010 blk_queue_bypass_end(q);
1011 } 1030 }
1012 1031
1013 if (q->mq_ops) { 1032 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
1014 blk_mq_unfreeze_queue(q);
1015 blk_mq_start_stopped_hw_queues(q, true);
1016 }
1017
1018 if (new_e)
1019 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
1020 else
1021 blk_add_trace_msg(q, "elv switch: none");
1022 1033
1023 return 0; 1034 return 0;
1024 1035
1025fail_register: 1036fail_register:
1026 if (q->mq_ops) 1037 elevator_exit(q, q->elevator);
1027 blk_mq_sched_teardown(q);
1028 elevator_exit(q->elevator);
1029fail_init: 1038fail_init:
1030 /* switch failed, restore and re-register old elevator */ 1039 /* switch failed, restore and re-register old elevator */
1031 if (old) { 1040 if (old) {
1032 q->elevator = old; 1041 q->elevator = old;
1033 elv_register_queue(q); 1042 elv_register_queue(q);
1034 if (!q->mq_ops) 1043 blk_queue_bypass_end(q);
1035 blk_queue_bypass_end(q);
1036 }
1037 if (q->mq_ops) {
1038 blk_mq_unfreeze_queue(q);
1039 blk_mq_start_stopped_hw_queues(q, true);
1040 } 1044 }
1041 1045
1042 return err; 1046 return err;
@@ -1094,12 +1098,20 @@ int elevator_change(struct request_queue *q, const char *name)
1094} 1098}
1095EXPORT_SYMBOL(elevator_change); 1099EXPORT_SYMBOL(elevator_change);
1096 1100
1101static inline bool elv_support_iosched(struct request_queue *q)
1102{
1103 if (q->mq_ops && q->tag_set && (q->tag_set->flags &
1104 BLK_MQ_F_NO_SCHED))
1105 return false;
1106 return true;
1107}
1108
1097ssize_t elv_iosched_store(struct request_queue *q, const char *name, 1109ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1098 size_t count) 1110 size_t count)
1099{ 1111{
1100 int ret; 1112 int ret;
1101 1113
1102 if (!(q->mq_ops || q->request_fn)) 1114 if (!(q->mq_ops || q->request_fn) || !elv_support_iosched(q))
1103 return count; 1115 return count;
1104 1116
1105 ret = __elevator_change(q, name); 1117 ret = __elevator_change(q, name);
@@ -1131,7 +1143,7 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
1131 len += sprintf(name+len, "[%s] ", elv->elevator_name); 1143 len += sprintf(name+len, "[%s] ", elv->elevator_name);
1132 continue; 1144 continue;
1133 } 1145 }
1134 if (__e->uses_mq && q->mq_ops) 1146 if (__e->uses_mq && q->mq_ops && elv_support_iosched(q))
1135 len += sprintf(name+len, "%s ", __e->elevator_name); 1147 len += sprintf(name+len, "%s ", __e->elevator_name);
1136 else if (!__e->uses_mq && !q->mq_ops) 1148 else if (!__e->uses_mq && !q->mq_ops)
1137 len += sprintf(name+len, "%s ", __e->elevator_name); 1149 len += sprintf(name+len, "%s ", __e->elevator_name);
diff --git a/crypto/ahash.c b/crypto/ahash.c
index e58c4970c22b..826cd7ab4d4a 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -32,6 +32,7 @@ struct ahash_request_priv {
32 crypto_completion_t complete; 32 crypto_completion_t complete;
33 void *data; 33 void *data;
34 u8 *result; 34 u8 *result;
35 u32 flags;
35 void *ubuf[] CRYPTO_MINALIGN_ATTR; 36 void *ubuf[] CRYPTO_MINALIGN_ATTR;
36}; 37};
37 38
@@ -253,6 +254,8 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
253 priv->result = req->result; 254 priv->result = req->result;
254 priv->complete = req->base.complete; 255 priv->complete = req->base.complete;
255 priv->data = req->base.data; 256 priv->data = req->base.data;
257 priv->flags = req->base.flags;
258
256 /* 259 /*
257 * WARNING: We do not backup req->priv here! The req->priv 260 * WARNING: We do not backup req->priv here! The req->priv
258 * is for internal use of the Crypto API and the 261 * is for internal use of the Crypto API and the
@@ -267,38 +270,44 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
267 return 0; 270 return 0;
268} 271}
269 272
270static void ahash_restore_req(struct ahash_request *req) 273static void ahash_restore_req(struct ahash_request *req, int err)
271{ 274{
272 struct ahash_request_priv *priv = req->priv; 275 struct ahash_request_priv *priv = req->priv;
273 276
277 if (!err)
278 memcpy(priv->result, req->result,
279 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
280
274 /* Restore the original crypto request. */ 281 /* Restore the original crypto request. */
275 req->result = priv->result; 282 req->result = priv->result;
276 req->base.complete = priv->complete; 283
277 req->base.data = priv->data; 284 ahash_request_set_callback(req, priv->flags,
285 priv->complete, priv->data);
278 req->priv = NULL; 286 req->priv = NULL;
279 287
280 /* Free the req->priv.priv from the ADJUSTED request. */ 288 /* Free the req->priv.priv from the ADJUSTED request. */
281 kzfree(priv); 289 kzfree(priv);
282} 290}
283 291
284static void ahash_op_unaligned_finish(struct ahash_request *req, int err) 292static void ahash_notify_einprogress(struct ahash_request *req)
285{ 293{
286 struct ahash_request_priv *priv = req->priv; 294 struct ahash_request_priv *priv = req->priv;
295 struct crypto_async_request oreq;
287 296
288 if (err == -EINPROGRESS) 297 oreq.data = priv->data;
289 return;
290
291 if (!err)
292 memcpy(priv->result, req->result,
293 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
294 298
295 ahash_restore_req(req); 299 priv->complete(&oreq, -EINPROGRESS);
296} 300}
297 301
298static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) 302static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
299{ 303{
300 struct ahash_request *areq = req->data; 304 struct ahash_request *areq = req->data;
301 305
306 if (err == -EINPROGRESS) {
307 ahash_notify_einprogress(areq);
308 return;
309 }
310
302 /* 311 /*
303 * Restore the original request, see ahash_op_unaligned() for what 312 * Restore the original request, see ahash_op_unaligned() for what
304 * goes where. 313 * goes where.
@@ -309,7 +318,7 @@ static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
309 */ 318 */
310 319
311 /* First copy req->result into req->priv.result */ 320 /* First copy req->result into req->priv.result */
312 ahash_op_unaligned_finish(areq, err); 321 ahash_restore_req(areq, err);
313 322
314 /* Complete the ORIGINAL request. */ 323 /* Complete the ORIGINAL request. */
315 areq->base.complete(&areq->base, err); 324 areq->base.complete(&areq->base, err);
@@ -325,7 +334,12 @@ static int ahash_op_unaligned(struct ahash_request *req,
325 return err; 334 return err;
326 335
327 err = op(req); 336 err = op(req);
328 ahash_op_unaligned_finish(req, err); 337 if (err == -EINPROGRESS ||
338 (err == -EBUSY && (ahash_request_flags(req) &
339 CRYPTO_TFM_REQ_MAY_BACKLOG)))
340 return err;
341
342 ahash_restore_req(req, err);
329 343
330 return err; 344 return err;
331} 345}
@@ -360,25 +374,14 @@ int crypto_ahash_digest(struct ahash_request *req)
360} 374}
361EXPORT_SYMBOL_GPL(crypto_ahash_digest); 375EXPORT_SYMBOL_GPL(crypto_ahash_digest);
362 376
363static void ahash_def_finup_finish2(struct ahash_request *req, int err) 377static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
364{ 378{
365 struct ahash_request_priv *priv = req->priv; 379 struct ahash_request *areq = req->data;
366 380
367 if (err == -EINPROGRESS) 381 if (err == -EINPROGRESS)
368 return; 382 return;
369 383
370 if (!err) 384 ahash_restore_req(areq, err);
371 memcpy(priv->result, req->result,
372 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
373
374 ahash_restore_req(req);
375}
376
377static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
378{
379 struct ahash_request *areq = req->data;
380
381 ahash_def_finup_finish2(areq, err);
382 385
383 areq->base.complete(&areq->base, err); 386 areq->base.complete(&areq->base, err);
384} 387}
@@ -389,11 +392,15 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err)
389 goto out; 392 goto out;
390 393
391 req->base.complete = ahash_def_finup_done2; 394 req->base.complete = ahash_def_finup_done2;
392 req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 395
393 err = crypto_ahash_reqtfm(req)->final(req); 396 err = crypto_ahash_reqtfm(req)->final(req);
397 if (err == -EINPROGRESS ||
398 (err == -EBUSY && (ahash_request_flags(req) &
399 CRYPTO_TFM_REQ_MAY_BACKLOG)))
400 return err;
394 401
395out: 402out:
396 ahash_def_finup_finish2(req, err); 403 ahash_restore_req(req, err);
397 return err; 404 return err;
398} 405}
399 406
@@ -401,7 +408,16 @@ static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
401{ 408{
402 struct ahash_request *areq = req->data; 409 struct ahash_request *areq = req->data;
403 410
411 if (err == -EINPROGRESS) {
412 ahash_notify_einprogress(areq);
413 return;
414 }
415
416 areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
417
404 err = ahash_def_finup_finish1(areq, err); 418 err = ahash_def_finup_finish1(areq, err);
419 if (areq->priv)
420 return;
405 421
406 areq->base.complete(&areq->base, err); 422 areq->base.complete(&areq->base, err);
407} 423}
@@ -416,6 +432,11 @@ static int ahash_def_finup(struct ahash_request *req)
416 return err; 432 return err;
417 433
418 err = tfm->update(req); 434 err = tfm->update(req);
435 if (err == -EINPROGRESS ||
436 (err == -EBUSY && (ahash_request_flags(req) &
437 CRYPTO_TFM_REQ_MAY_BACKLOG)))
438 return err;
439
419 return ahash_def_finup_finish1(req, err); 440 return ahash_def_finup_finish1(req, err);
420} 441}
421 442
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 5a8053758657..ef59d9926ee9 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -40,6 +40,7 @@ struct aead_async_req {
40 struct aead_async_rsgl first_rsgl; 40 struct aead_async_rsgl first_rsgl;
41 struct list_head list; 41 struct list_head list;
42 struct kiocb *iocb; 42 struct kiocb *iocb;
43 struct sock *sk;
43 unsigned int tsgls; 44 unsigned int tsgls;
44 char iv[]; 45 char iv[];
45}; 46};
@@ -379,12 +380,10 @@ unlock:
379 380
380static void aead_async_cb(struct crypto_async_request *_req, int err) 381static void aead_async_cb(struct crypto_async_request *_req, int err)
381{ 382{
382 struct sock *sk = _req->data; 383 struct aead_request *req = _req->data;
383 struct alg_sock *ask = alg_sk(sk); 384 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
384 struct aead_ctx *ctx = ask->private;
385 struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
386 struct aead_request *req = aead_request_cast(_req);
387 struct aead_async_req *areq = GET_ASYM_REQ(req, tfm); 385 struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
386 struct sock *sk = areq->sk;
388 struct scatterlist *sg = areq->tsgl; 387 struct scatterlist *sg = areq->tsgl;
389 struct aead_async_rsgl *rsgl; 388 struct aead_async_rsgl *rsgl;
390 struct kiocb *iocb = areq->iocb; 389 struct kiocb *iocb = areq->iocb;
@@ -447,11 +446,12 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
447 memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl)); 446 memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
448 INIT_LIST_HEAD(&areq->list); 447 INIT_LIST_HEAD(&areq->list);
449 areq->iocb = msg->msg_iocb; 448 areq->iocb = msg->msg_iocb;
449 areq->sk = sk;
450 memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm)); 450 memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
451 aead_request_set_tfm(req, tfm); 451 aead_request_set_tfm(req, tfm);
452 aead_request_set_ad(req, ctx->aead_assoclen); 452 aead_request_set_ad(req, ctx->aead_assoclen);
453 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 453 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
454 aead_async_cb, sk); 454 aead_async_cb, req);
455 used -= ctx->aead_assoclen; 455 used -= ctx->aead_assoclen;
456 456
457 /* take over all tx sgls from ctx */ 457 /* take over all tx sgls from ctx */
diff --git a/crypto/lrw.c b/crypto/lrw.c
index ecd8474018e3..a8bfae4451bf 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -286,8 +286,11 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
286 286
287 subreq->cryptlen = LRW_BUFFER_SIZE; 287 subreq->cryptlen = LRW_BUFFER_SIZE;
288 if (req->cryptlen > LRW_BUFFER_SIZE) { 288 if (req->cryptlen > LRW_BUFFER_SIZE) {
289 subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE); 289 unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
290 rctx->ext = kmalloc(subreq->cryptlen, gfp); 290
291 rctx->ext = kmalloc(n, gfp);
292 if (rctx->ext)
293 subreq->cryptlen = n;
291 } 294 }
292 295
293 rctx->src = req->src; 296 rctx->src = req->src;
@@ -342,6 +345,13 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
342 struct rctx *rctx; 345 struct rctx *rctx;
343 346
344 rctx = skcipher_request_ctx(req); 347 rctx = skcipher_request_ctx(req);
348
349 if (err == -EINPROGRESS) {
350 if (rctx->left != req->cryptlen)
351 return;
352 goto out;
353 }
354
345 subreq = &rctx->subreq; 355 subreq = &rctx->subreq;
346 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 356 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
347 357
@@ -349,6 +359,7 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
349 if (rctx->left) 359 if (rctx->left)
350 return; 360 return;
351 361
362out:
352 skcipher_request_complete(req, err); 363 skcipher_request_complete(req, err);
353} 364}
354 365
@@ -386,6 +397,13 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
386 struct rctx *rctx; 397 struct rctx *rctx;
387 398
388 rctx = skcipher_request_ctx(req); 399 rctx = skcipher_request_ctx(req);
400
401 if (err == -EINPROGRESS) {
402 if (rctx->left != req->cryptlen)
403 return;
404 goto out;
405 }
406
389 subreq = &rctx->subreq; 407 subreq = &rctx->subreq;
390 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 408 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
391 409
@@ -393,6 +411,7 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
393 if (rctx->left) 411 if (rctx->left)
394 return; 412 return;
395 413
414out:
396 skcipher_request_complete(req, err); 415 skcipher_request_complete(req, err);
397} 416}
398 417
diff --git a/crypto/xts.c b/crypto/xts.c
index baeb34dd8582..89ace5ebc2da 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -230,8 +230,11 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
230 230
231 subreq->cryptlen = XTS_BUFFER_SIZE; 231 subreq->cryptlen = XTS_BUFFER_SIZE;
232 if (req->cryptlen > XTS_BUFFER_SIZE) { 232 if (req->cryptlen > XTS_BUFFER_SIZE) {
233 subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE); 233 unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
234 rctx->ext = kmalloc(subreq->cryptlen, gfp); 234
235 rctx->ext = kmalloc(n, gfp);
236 if (rctx->ext)
237 subreq->cryptlen = n;
235 } 238 }
236 239
237 rctx->src = req->src; 240 rctx->src = req->src;
@@ -283,6 +286,13 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
283 struct rctx *rctx; 286 struct rctx *rctx;
284 287
285 rctx = skcipher_request_ctx(req); 288 rctx = skcipher_request_ctx(req);
289
290 if (err == -EINPROGRESS) {
291 if (rctx->left != req->cryptlen)
292 return;
293 goto out;
294 }
295
286 subreq = &rctx->subreq; 296 subreq = &rctx->subreq;
287 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 297 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
288 298
@@ -290,6 +300,7 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
290 if (rctx->left) 300 if (rctx->left)
291 return; 301 return;
292 302
303out:
293 skcipher_request_complete(req, err); 304 skcipher_request_complete(req, err);
294} 305}
295 306
@@ -327,6 +338,13 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
327 struct rctx *rctx; 338 struct rctx *rctx;
328 339
329 rctx = skcipher_request_ctx(req); 340 rctx = skcipher_request_ctx(req);
341
342 if (err == -EINPROGRESS) {
343 if (rctx->left != req->cryptlen)
344 return;
345 goto out;
346 }
347
330 subreq = &rctx->subreq; 348 subreq = &rctx->subreq;
331 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 349 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
332 350
@@ -334,6 +352,7 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
334 if (rctx->left) 352 if (rctx->left)
335 return; 353 return;
336 354
355out:
337 skcipher_request_complete(req, err); 356 skcipher_request_complete(req, err);
338} 357}
339 358
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index a391bbc48105..d94f92f88ca1 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -2,7 +2,6 @@
2# Makefile for the Linux ACPI interpreter 2# Makefile for the Linux ACPI interpreter
3# 3#
4 4
5ccflags-y := -Os
6ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT 5ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
7 6
8# 7#
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index b4c1a6a51da4..03250e1f1103 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -25,9 +25,11 @@
25ACPI_MODULE_NAME("platform"); 25ACPI_MODULE_NAME("platform");
26 26
27static const struct acpi_device_id forbidden_id_list[] = { 27static const struct acpi_device_id forbidden_id_list[] = {
28 {"PNP0000", 0}, /* PIC */ 28 {"PNP0000", 0}, /* PIC */
29 {"PNP0100", 0}, /* Timer */ 29 {"PNP0100", 0}, /* Timer */
30 {"PNP0200", 0}, /* AT DMA Controller */ 30 {"PNP0200", 0}, /* AT DMA Controller */
31 {"ACPI0009", 0}, /* IOxAPIC */
32 {"ACPI000A", 0}, /* IOAPIC */
31 {"", 0}, 33 {"", 0},
32}; 34};
33 35
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index c86bae7b1d0f..ff096d9755b9 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -421,10 +421,8 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
421 421
422 ACPI_FUNCTION_TRACE(ut_walk_aml_resources); 422 ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
423 423
424 /* 424 /* The absolute minimum resource template is one end_tag descriptor */
425 * The absolute minimum resource template is one end_tag descriptor. 425
426 * However, we will treat a lone end_tag as just a simple buffer.
427 */
428 if (aml_length < sizeof(struct aml_resource_end_tag)) { 426 if (aml_length < sizeof(struct aml_resource_end_tag)) {
429 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); 427 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
430 } 428 }
@@ -456,8 +454,9 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
456 /* Invoke the user function */ 454 /* Invoke the user function */
457 455
458 if (user_function) { 456 if (user_function) {
459 status = user_function(aml, length, offset, 457 status =
460 resource_index, context); 458 user_function(aml, length, offset, resource_index,
459 context);
461 if (ACPI_FAILURE(status)) { 460 if (ACPI_FAILURE(status)) {
462 return_ACPI_STATUS(status); 461 return_ACPI_STATUS(status);
463 } 462 }
@@ -481,12 +480,6 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
481 *context = aml; 480 *context = aml;
482 } 481 }
483 482
484 /* Check if buffer is defined to be longer than the resource length */
485
486 if (aml_length > (offset + length)) {
487 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
488 }
489
490 /* Normal exit */ 483 /* Normal exit */
491 484
492 return_ACPI_STATUS(AE_OK); 485 return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index b192b42a8351..79b3c9c5a3bc 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -1073,6 +1073,7 @@ static int ghes_remove(struct platform_device *ghes_dev)
1073 if (list_empty(&ghes_sci)) 1073 if (list_empty(&ghes_sci))
1074 unregister_acpi_hed_notifier(&ghes_notifier_sci); 1074 unregister_acpi_hed_notifier(&ghes_notifier_sci);
1075 mutex_unlock(&ghes_list_mutex); 1075 mutex_unlock(&ghes_list_mutex);
1076 synchronize_rcu();
1076 break; 1077 break;
1077 case ACPI_HEST_NOTIFY_NMI: 1078 case ACPI_HEST_NOTIFY_NMI:
1078 ghes_nmi_remove(ghes); 1079 ghes_nmi_remove(ghes);
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index fb19e1cdb641..edc8663b5db3 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -99,13 +99,13 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
99 return -ENODEV; 99 return -ENODEV;
100 100
101 /* 101 /*
102 * If the device has a _HID (or _CID) returning a valid ACPI/PNP 102 * If the device has a _HID returning a valid ACPI/PNP device ID, it is
103 * device ID, it is better to make it look less attractive here, so that 103 * better to make it look less attractive here, so that the other device
104 * the other device with the same _ADR value (that may not have a valid 104 * with the same _ADR value (that may not have a valid device ID) can be
105 * device ID) can be matched going forward. [This means a second spec 105 * matched going forward. [This means a second spec violation in a row,
106 * violation in a row, so whatever we do here is best effort anyway.] 106 * so whatever we do here is best effort anyway.]
107 */ 107 */
108 return sta_present && list_empty(&adev->pnp.ids) ? 108 return sta_present && !adev->pnp.type.platform_id ?
109 FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE; 109 FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
110} 110}
111 111
diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c
index 1120dfd625b8..7e4fbf9a53a3 100644
--- a/drivers/acpi/ioapic.c
+++ b/drivers/acpi/ioapic.c
@@ -45,6 +45,12 @@ static acpi_status setup_res(struct acpi_resource *acpi_res, void *data)
45 struct resource *res = data; 45 struct resource *res = data;
46 struct resource_win win; 46 struct resource_win win;
47 47
48 /*
49 * We might assign this to 'res' later, make sure all pointers are
50 * cleared before the resource is added to the global list
51 */
52 memset(&win, 0, sizeof(win));
53
48 res->flags = 0; 54 res->flags = 0;
49 if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM)) 55 if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM))
50 return AE_OK; 56 return AE_OK;
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 662036bdc65e..c8ea9d698cd0 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1617,7 +1617,11 @@ static int cmp_map(const void *m0, const void *m1)
1617 const struct nfit_set_info_map *map0 = m0; 1617 const struct nfit_set_info_map *map0 = m0;
1618 const struct nfit_set_info_map *map1 = m1; 1618 const struct nfit_set_info_map *map1 = m1;
1619 1619
1620 return map0->region_offset - map1->region_offset; 1620 if (map0->region_offset < map1->region_offset)
1621 return -1;
1622 else if (map0->region_offset > map1->region_offset)
1623 return 1;
1624 return 0;
1621} 1625}
1622 1626
1623/* Retrieve the nth entry referencing this spa */ 1627/* Retrieve the nth entry referencing this spa */
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index fcd4ce6f78d5..1c2b846c5776 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -200,6 +200,7 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
200 return -EINVAL; 200 return -EINVAL;
201 201
202 /* The state of the list is 'on' IFF all resources are 'on'. */ 202 /* The state of the list is 'on' IFF all resources are 'on'. */
203 cur_state = 0;
203 list_for_each_entry(entry, list, node) { 204 list_for_each_entry(entry, list, node) {
204 struct acpi_power_resource *resource = entry->resource; 205 struct acpi_power_resource *resource = entry->resource;
205 acpi_handle handle = resource->device.handle; 206 acpi_handle handle = resource->device.handle;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 192691880d55..2433569b02ef 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1857,15 +1857,20 @@ static void acpi_bus_attach(struct acpi_device *device)
1857 return; 1857 return;
1858 1858
1859 device->flags.match_driver = true; 1859 device->flags.match_driver = true;
1860 if (!ret) { 1860 if (ret > 0) {
1861 ret = device_attach(&device->dev); 1861 acpi_device_set_enumerated(device);
1862 if (ret < 0) 1862 goto ok;
1863 return;
1864
1865 if (!ret && device->pnp.type.platform_id)
1866 acpi_default_enumeration(device);
1867 } 1863 }
1868 1864
1865 ret = device_attach(&device->dev);
1866 if (ret < 0)
1867 return;
1868
1869 if (ret > 0 || !device->pnp.type.platform_id)
1870 acpi_device_set_enumerated(device);
1871 else
1872 acpi_default_enumeration(device);
1873
1869 ok: 1874 ok:
1870 list_for_each_entry(child, &device->children, node) 1875 list_for_each_entry(child, &device->children, node)
1871 acpi_bus_attach(child); 1876 acpi_bus_attach(child);
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index 01c94669a2b0..3afa8c1fa127 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -30,7 +30,7 @@ static bool qdf2400_erratum_44_present(struct acpi_table_header *h)
30 return true; 30 return true;
31 31
32 if (!memcmp(h->oem_table_id, "QDF2400 ", ACPI_OEM_TABLE_ID_SIZE) && 32 if (!memcmp(h->oem_table_id, "QDF2400 ", ACPI_OEM_TABLE_ID_SIZE) &&
33 h->oem_revision == 0) 33 h->oem_revision == 1)
34 return true; 34 return true;
35 35
36 return false; 36 return false;
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 6c9aa95a9a05..49d705c9f0f7 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -278,11 +278,6 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
278 }; 278 };
279 const struct ata_port_info *ppi[] = { &info, &info }; 279 const struct ata_port_info *ppi[] = { &info, &info };
280 280
281 /* SB600/700 don't have secondary port wired */
282 if ((pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE) ||
283 (pdev->device == PCI_DEVICE_ID_ATI_IXP700_IDE))
284 ppi[1] = &ata_dummy_port_info;
285
286 return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL, 281 return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
287 ATA_HOST_PARALLEL_SCAN); 282 ATA_HOST_PARALLEL_SCAN);
288} 283}
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 0636d84fbefe..f3f538eec7b3 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -644,14 +644,16 @@ static void svia_configure(struct pci_dev *pdev, int board_id,
644 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); 644 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
645 } 645 }
646 646
647 /* enable IRQ on hotplug */ 647 if (board_id == vt6421) {
648 pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8); 648 /* enable IRQ on hotplug */
649 if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) { 649 pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
650 dev_dbg(&pdev->dev, 650 if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
651 "enabling SATA hotplug (0x%x)\n", 651 dev_dbg(&pdev->dev,
652 (int) tmp8); 652 "enabling SATA hotplug (0x%x)\n",
653 tmp8 |= SATA_HOTPLUG; 653 (int) tmp8);
654 pci_write_config_byte(pdev, SVIA_MISC_3, tmp8); 654 tmp8 |= SATA_HOTPLUG;
655 pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
656 }
655 } 657 }
656 658
657 /* 659 /*
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
index bf43b5d2aafc..83f1439e57fd 100644
--- a/drivers/auxdisplay/img-ascii-lcd.c
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -218,6 +218,7 @@ static const struct of_device_id img_ascii_lcd_matches[] = {
218 { .compatible = "img,boston-lcd", .data = &boston_config }, 218 { .compatible = "img,boston-lcd", .data = &boston_config },
219 { .compatible = "mti,malta-lcd", .data = &malta_config }, 219 { .compatible = "mti,malta-lcd", .data = &malta_config },
220 { .compatible = "mti,sead3-lcd", .data = &sead3_config }, 220 { .compatible = "mti,sead3-lcd", .data = &sead3_config },
221 { /* sentinel */ }
221}; 222};
222 223
223/** 224/**
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index f96ab717534c..1d1dc11aa5fa 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3969,7 +3969,7 @@ static int mtip_block_initialize(struct driver_data *dd)
3969 dd->tags.reserved_tags = 1; 3969 dd->tags.reserved_tags = 1;
3970 dd->tags.cmd_size = sizeof(struct mtip_cmd); 3970 dd->tags.cmd_size = sizeof(struct mtip_cmd);
3971 dd->tags.numa_node = dd->numa_node; 3971 dd->tags.numa_node = dd->numa_node;
3972 dd->tags.flags = BLK_MQ_F_SHOULD_MERGE; 3972 dd->tags.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_NO_SCHED;
3973 dd->tags.driver_data = dd; 3973 dd->tags.driver_data = dd;
3974 dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS; 3974 dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
3975 3975
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 7e4287bc19e5..d8a23561b4cb 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -47,6 +47,8 @@ static DEFINE_MUTEX(nbd_index_mutex);
47struct nbd_sock { 47struct nbd_sock {
48 struct socket *sock; 48 struct socket *sock;
49 struct mutex tx_lock; 49 struct mutex tx_lock;
50 struct request *pending;
51 int sent;
50}; 52};
51 53
52#define NBD_TIMEDOUT 0 54#define NBD_TIMEDOUT 0
@@ -124,7 +126,8 @@ static const char *nbdcmd_to_ascii(int cmd)
124 126
125static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev) 127static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev)
126{ 128{
127 bd_set_size(bdev, 0); 129 if (bdev->bd_openers <= 1)
130 bd_set_size(bdev, 0);
128 set_capacity(nbd->disk, 0); 131 set_capacity(nbd->disk, 0);
129 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); 132 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
130 133
@@ -190,7 +193,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
190 193
191 dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n"); 194 dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
192 set_bit(NBD_TIMEDOUT, &nbd->runtime_flags); 195 set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
193 req->errors++; 196 req->errors = -EIO;
194 197
195 mutex_lock(&nbd->config_lock); 198 mutex_lock(&nbd->config_lock);
196 sock_shutdown(nbd); 199 sock_shutdown(nbd);
@@ -202,7 +205,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
202 * Send or receive packet. 205 * Send or receive packet.
203 */ 206 */
204static int sock_xmit(struct nbd_device *nbd, int index, int send, 207static int sock_xmit(struct nbd_device *nbd, int index, int send,
205 struct iov_iter *iter, int msg_flags) 208 struct iov_iter *iter, int msg_flags, int *sent)
206{ 209{
207 struct socket *sock = nbd->socks[index]->sock; 210 struct socket *sock = nbd->socks[index]->sock;
208 int result; 211 int result;
@@ -237,6 +240,8 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
237 result = -EPIPE; /* short read */ 240 result = -EPIPE; /* short read */
238 break; 241 break;
239 } 242 }
243 if (sent)
244 *sent += result;
240 } while (msg_data_left(&msg)); 245 } while (msg_data_left(&msg));
241 246
242 tsk_restore_flags(current, pflags, PF_MEMALLOC); 247 tsk_restore_flags(current, pflags, PF_MEMALLOC);
@@ -248,6 +253,7 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
248static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) 253static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
249{ 254{
250 struct request *req = blk_mq_rq_from_pdu(cmd); 255 struct request *req = blk_mq_rq_from_pdu(cmd);
256 struct nbd_sock *nsock = nbd->socks[index];
251 int result; 257 int result;
252 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)}; 258 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
253 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; 259 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
@@ -256,6 +262,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
256 struct bio *bio; 262 struct bio *bio;
257 u32 type; 263 u32 type;
258 u32 tag = blk_mq_unique_tag(req); 264 u32 tag = blk_mq_unique_tag(req);
265 int sent = nsock->sent, skip = 0;
259 266
260 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); 267 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
261 268
@@ -283,6 +290,17 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
283 return -EIO; 290 return -EIO;
284 } 291 }
285 292
293 /* We did a partial send previously, and we at least sent the whole
294 * request struct, so just go and send the rest of the pages in the
295 * request.
296 */
297 if (sent) {
298 if (sent >= sizeof(request)) {
299 skip = sent - sizeof(request);
300 goto send_pages;
301 }
302 iov_iter_advance(&from, sent);
303 }
286 request.type = htonl(type); 304 request.type = htonl(type);
287 if (type != NBD_CMD_FLUSH) { 305 if (type != NBD_CMD_FLUSH) {
288 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); 306 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
@@ -294,15 +312,27 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
294 cmd, nbdcmd_to_ascii(type), 312 cmd, nbdcmd_to_ascii(type),
295 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); 313 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
296 result = sock_xmit(nbd, index, 1, &from, 314 result = sock_xmit(nbd, index, 1, &from,
297 (type == NBD_CMD_WRITE) ? MSG_MORE : 0); 315 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
298 if (result <= 0) { 316 if (result <= 0) {
317 if (result == -ERESTARTSYS) {
318 /* If we havne't sent anything we can just return BUSY,
319 * however if we have sent something we need to make
320 * sure we only allow this req to be sent until we are
321 * completely done.
322 */
323 if (sent) {
324 nsock->pending = req;
325 nsock->sent = sent;
326 }
327 return BLK_MQ_RQ_QUEUE_BUSY;
328 }
299 dev_err_ratelimited(disk_to_dev(nbd->disk), 329 dev_err_ratelimited(disk_to_dev(nbd->disk),
300 "Send control failed (result %d)\n", result); 330 "Send control failed (result %d)\n", result);
301 return -EIO; 331 return -EIO;
302 } 332 }
303 333send_pages:
304 if (type != NBD_CMD_WRITE) 334 if (type != NBD_CMD_WRITE)
305 return 0; 335 goto out;
306 336
307 bio = req->bio; 337 bio = req->bio;
308 while (bio) { 338 while (bio) {
@@ -318,8 +348,25 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
318 cmd, bvec.bv_len); 348 cmd, bvec.bv_len);
319 iov_iter_bvec(&from, ITER_BVEC | WRITE, 349 iov_iter_bvec(&from, ITER_BVEC | WRITE,
320 &bvec, 1, bvec.bv_len); 350 &bvec, 1, bvec.bv_len);
321 result = sock_xmit(nbd, index, 1, &from, flags); 351 if (skip) {
352 if (skip >= iov_iter_count(&from)) {
353 skip -= iov_iter_count(&from);
354 continue;
355 }
356 iov_iter_advance(&from, skip);
357 skip = 0;
358 }
359 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
322 if (result <= 0) { 360 if (result <= 0) {
361 if (result == -ERESTARTSYS) {
362 /* We've already sent the header, we
363 * have no choice but to set pending and
364 * return BUSY.
365 */
366 nsock->pending = req;
367 nsock->sent = sent;
368 return BLK_MQ_RQ_QUEUE_BUSY;
369 }
323 dev_err(disk_to_dev(nbd->disk), 370 dev_err(disk_to_dev(nbd->disk),
324 "Send data failed (result %d)\n", 371 "Send data failed (result %d)\n",
325 result); 372 result);
@@ -336,6 +383,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
336 } 383 }
337 bio = next; 384 bio = next;
338 } 385 }
386out:
387 nsock->pending = NULL;
388 nsock->sent = 0;
339 return 0; 389 return 0;
340} 390}
341 391
@@ -353,7 +403,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
353 403
354 reply.magic = 0; 404 reply.magic = 0;
355 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); 405 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
356 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL); 406 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
357 if (result <= 0) { 407 if (result <= 0) {
358 if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) && 408 if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) &&
359 !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) 409 !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
@@ -383,7 +433,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
383 if (ntohl(reply.error)) { 433 if (ntohl(reply.error)) {
384 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", 434 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
385 ntohl(reply.error)); 435 ntohl(reply.error));
386 req->errors++; 436 req->errors = -EIO;
387 return cmd; 437 return cmd;
388 } 438 }
389 439
@@ -395,11 +445,11 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
395 rq_for_each_segment(bvec, req, iter) { 445 rq_for_each_segment(bvec, req, iter) {
396 iov_iter_bvec(&to, ITER_BVEC | READ, 446 iov_iter_bvec(&to, ITER_BVEC | READ,
397 &bvec, 1, bvec.bv_len); 447 &bvec, 1, bvec.bv_len);
398 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL); 448 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
399 if (result <= 0) { 449 if (result <= 0) {
400 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", 450 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
401 result); 451 result);
402 req->errors++; 452 req->errors = -EIO;
403 return cmd; 453 return cmd;
404 } 454 }
405 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", 455 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
@@ -469,7 +519,7 @@ static void nbd_clear_req(struct request *req, void *data, bool reserved)
469 if (!blk_mq_request_started(req)) 519 if (!blk_mq_request_started(req))
470 return; 520 return;
471 cmd = blk_mq_rq_to_pdu(req); 521 cmd = blk_mq_rq_to_pdu(req);
472 req->errors++; 522 req->errors = -EIO;
473 nbd_end_request(cmd); 523 nbd_end_request(cmd);
474} 524}
475 525
@@ -482,22 +532,23 @@ static void nbd_clear_que(struct nbd_device *nbd)
482} 532}
483 533
484 534
485static void nbd_handle_cmd(struct nbd_cmd *cmd, int index) 535static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
486{ 536{
487 struct request *req = blk_mq_rq_from_pdu(cmd); 537 struct request *req = blk_mq_rq_from_pdu(cmd);
488 struct nbd_device *nbd = cmd->nbd; 538 struct nbd_device *nbd = cmd->nbd;
489 struct nbd_sock *nsock; 539 struct nbd_sock *nsock;
540 int ret;
490 541
491 if (index >= nbd->num_connections) { 542 if (index >= nbd->num_connections) {
492 dev_err_ratelimited(disk_to_dev(nbd->disk), 543 dev_err_ratelimited(disk_to_dev(nbd->disk),
493 "Attempted send on invalid socket\n"); 544 "Attempted send on invalid socket\n");
494 goto error_out; 545 return -EINVAL;
495 } 546 }
496 547
497 if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) { 548 if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) {
498 dev_err_ratelimited(disk_to_dev(nbd->disk), 549 dev_err_ratelimited(disk_to_dev(nbd->disk),
499 "Attempted send on closed socket\n"); 550 "Attempted send on closed socket\n");
500 goto error_out; 551 return -EINVAL;
501 } 552 }
502 553
503 req->errors = 0; 554 req->errors = 0;
@@ -508,29 +559,30 @@ static void nbd_handle_cmd(struct nbd_cmd *cmd, int index)
508 mutex_unlock(&nsock->tx_lock); 559 mutex_unlock(&nsock->tx_lock);
509 dev_err_ratelimited(disk_to_dev(nbd->disk), 560 dev_err_ratelimited(disk_to_dev(nbd->disk),
510 "Attempted send on closed socket\n"); 561 "Attempted send on closed socket\n");
511 goto error_out; 562 return -EINVAL;
512 } 563 }
513 564
514 if (nbd_send_cmd(nbd, cmd, index) != 0) { 565 /* Handle the case that we have a pending request that was partially
515 dev_err_ratelimited(disk_to_dev(nbd->disk), 566 * transmitted that _has_ to be serviced first. We need to call requeue
516 "Request send failed\n"); 567 * here so that it gets put _after_ the request that is already on the
517 req->errors++; 568 * dispatch list.
518 nbd_end_request(cmd); 569 */
570 if (unlikely(nsock->pending && nsock->pending != req)) {
571 blk_mq_requeue_request(req, true);
572 ret = 0;
573 goto out;
519 } 574 }
520 575 ret = nbd_send_cmd(nbd, cmd, index);
576out:
521 mutex_unlock(&nsock->tx_lock); 577 mutex_unlock(&nsock->tx_lock);
522 578 return ret;
523 return;
524
525error_out:
526 req->errors++;
527 nbd_end_request(cmd);
528} 579}
529 580
530static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx, 581static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
531 const struct blk_mq_queue_data *bd) 582 const struct blk_mq_queue_data *bd)
532{ 583{
533 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 584 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
585 int ret;
534 586
535 /* 587 /*
536 * Since we look at the bio's to send the request over the network we 588 * Since we look at the bio's to send the request over the network we
@@ -543,10 +595,20 @@ static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
543 */ 595 */
544 init_completion(&cmd->send_complete); 596 init_completion(&cmd->send_complete);
545 blk_mq_start_request(bd->rq); 597 blk_mq_start_request(bd->rq);
546 nbd_handle_cmd(cmd, hctx->queue_num); 598
599 /* We can be called directly from the user space process, which means we
600 * could possibly have signals pending so our sendmsg will fail. In
601 * this case we need to return that we are busy, otherwise error out as
602 * appropriate.
603 */
604 ret = nbd_handle_cmd(cmd, hctx->queue_num);
605 if (ret < 0)
606 ret = BLK_MQ_RQ_QUEUE_ERROR;
607 if (!ret)
608 ret = BLK_MQ_RQ_QUEUE_OK;
547 complete(&cmd->send_complete); 609 complete(&cmd->send_complete);
548 610
549 return BLK_MQ_RQ_QUEUE_OK; 611 return ret;
550} 612}
551 613
552static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev, 614static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev,
@@ -581,6 +643,8 @@ static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev,
581 643
582 mutex_init(&nsock->tx_lock); 644 mutex_init(&nsock->tx_lock);
583 nsock->sock = sock; 645 nsock->sock = sock;
646 nsock->pending = NULL;
647 nsock->sent = 0;
584 socks[nbd->num_connections++] = nsock; 648 socks[nbd->num_connections++] = nsock;
585 649
586 if (max_part) 650 if (max_part)
@@ -602,6 +666,8 @@ static void nbd_reset(struct nbd_device *nbd)
602 666
603static void nbd_bdev_reset(struct block_device *bdev) 667static void nbd_bdev_reset(struct block_device *bdev)
604{ 668{
669 if (bdev->bd_openers > 1)
670 return;
605 set_device_ro(bdev, false); 671 set_device_ro(bdev, false);
606 bdev->bd_inode->i_size = 0; 672 bdev->bd_inode->i_size = 0;
607 if (max_part > 0) { 673 if (max_part > 0) {
@@ -634,7 +700,7 @@ static void send_disconnects(struct nbd_device *nbd)
634 700
635 for (i = 0; i < nbd->num_connections; i++) { 701 for (i = 0; i < nbd->num_connections; i++) {
636 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); 702 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
637 ret = sock_xmit(nbd, i, 1, &from, 0); 703 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
638 if (ret <= 0) 704 if (ret <= 0)
639 dev_err(disk_to_dev(nbd->disk), 705 dev_err(disk_to_dev(nbd->disk),
640 "Send disconnect failed %d\n", ret); 706 "Send disconnect failed %d\n", ret);
@@ -665,7 +731,8 @@ static int nbd_clear_sock(struct nbd_device *nbd, struct block_device *bdev)
665{ 731{
666 sock_shutdown(nbd); 732 sock_shutdown(nbd);
667 nbd_clear_que(nbd); 733 nbd_clear_que(nbd);
668 kill_bdev(bdev); 734
735 __invalidate_device(bdev, true);
669 nbd_bdev_reset(bdev); 736 nbd_bdev_reset(bdev);
670 /* 737 /*
671 * We want to give the run thread a chance to wait for everybody 738 * We want to give the run thread a chance to wait for everybody
@@ -781,7 +848,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
781 nbd_size_set(nbd, bdev, nbd->blksize, arg); 848 nbd_size_set(nbd, bdev, nbd->blksize, arg);
782 return 0; 849 return 0;
783 case NBD_SET_TIMEOUT: 850 case NBD_SET_TIMEOUT:
784 nbd->tag_set.timeout = arg * HZ; 851 if (arg) {
852 nbd->tag_set.timeout = arg * HZ;
853 blk_queue_rq_timeout(nbd->disk->queue, arg * HZ);
854 }
785 return 0; 855 return 0;
786 856
787 case NBD_SET_FLAGS: 857 case NBD_SET_FLAGS:
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index dceb5edd1e54..0c09d4256108 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -523,7 +523,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
523 523
524 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); 524 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
525 if (size == PAGE_SIZE) { 525 if (size == PAGE_SIZE) {
526 copy_page(mem, cmem); 526 memcpy(mem, cmem, PAGE_SIZE);
527 } else { 527 } else {
528 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); 528 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
529 529
@@ -717,7 +717,7 @@ compress_again:
717 717
718 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) { 718 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
719 src = kmap_atomic(page); 719 src = kmap_atomic(page);
720 copy_page(cmem, src); 720 memcpy(cmem, src, PAGE_SIZE);
721 kunmap_atomic(src); 721 kunmap_atomic(src);
722 } else { 722 } else {
723 memcpy(cmem, src, clen); 723 memcpy(cmem, src, clen);
@@ -928,7 +928,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
928 } 928 }
929 929
930 index = sector >> SECTORS_PER_PAGE_SHIFT; 930 index = sector >> SECTORS_PER_PAGE_SHIFT;
931 offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT; 931 offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
932 932
933 bv.bv_page = page; 933 bv.bv_page = page;
934 bv.bv_len = PAGE_SIZE; 934 bv.bv_len = PAGE_SIZE;
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index c2c14a12713b..08e054507d0b 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -344,7 +344,8 @@ config BT_WILINK
344 344
345config BT_QCOMSMD 345config BT_QCOMSMD
346 tristate "Qualcomm SMD based HCI support" 346 tristate "Qualcomm SMD based HCI support"
347 depends on (QCOM_SMD && QCOM_WCNSS_CTRL) || COMPILE_TEST 347 depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n)
348 depends on QCOM_WCNSS_CTRL || (COMPILE_TEST && QCOM_WCNSS_CTRL=n)
348 select BT_QCA 349 select BT_QCA
349 help 350 help
350 Qualcomm SMD based HCI driver. 351 Qualcomm SMD based HCI driver.
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index 4a99ac756f08..9959c762da2f 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -55,6 +55,7 @@ MODULE_DEVICE_TABLE(pci, pci_tbl);
55struct amd768_priv { 55struct amd768_priv {
56 void __iomem *iobase; 56 void __iomem *iobase;
57 struct pci_dev *pcidev; 57 struct pci_dev *pcidev;
58 u32 pmbase;
58}; 59};
59 60
60static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) 61static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
@@ -148,33 +149,58 @@ found:
148 if (pmbase == 0) 149 if (pmbase == 0)
149 return -EIO; 150 return -EIO;
150 151
151 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 152 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
152 if (!priv) 153 if (!priv)
153 return -ENOMEM; 154 return -ENOMEM;
154 155
155 if (!devm_request_region(&pdev->dev, pmbase + PMBASE_OFFSET, 156 if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) {
156 PMBASE_SIZE, DRV_NAME)) {
157 dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n", 157 dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
158 pmbase + 0xF0); 158 pmbase + 0xF0);
159 return -EBUSY; 159 err = -EBUSY;
160 goto out;
160 } 161 }
161 162
162 priv->iobase = devm_ioport_map(&pdev->dev, pmbase + PMBASE_OFFSET, 163 priv->iobase = ioport_map(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
163 PMBASE_SIZE);
164 if (!priv->iobase) { 164 if (!priv->iobase) {
165 pr_err(DRV_NAME "Cannot map ioport\n"); 165 pr_err(DRV_NAME "Cannot map ioport\n");
166 return -ENOMEM; 166 err = -EINVAL;
167 goto err_iomap;
167 } 168 }
168 169
169 amd_rng.priv = (unsigned long)priv; 170 amd_rng.priv = (unsigned long)priv;
171 priv->pmbase = pmbase;
170 priv->pcidev = pdev; 172 priv->pcidev = pdev;
171 173
172 pr_info(DRV_NAME " detected\n"); 174 pr_info(DRV_NAME " detected\n");
173 return devm_hwrng_register(&pdev->dev, &amd_rng); 175 err = hwrng_register(&amd_rng);
176 if (err) {
177 pr_err(DRV_NAME " registering failed (%d)\n", err);
178 goto err_hwrng;
179 }
180 return 0;
181
182err_hwrng:
183 ioport_unmap(priv->iobase);
184err_iomap:
185 release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
186out:
187 kfree(priv);
188 return err;
174} 189}
175 190
176static void __exit mod_exit(void) 191static void __exit mod_exit(void)
177{ 192{
193 struct amd768_priv *priv;
194
195 priv = (struct amd768_priv *)amd_rng.priv;
196
197 hwrng_unregister(&amd_rng);
198
199 ioport_unmap(priv->iobase);
200
201 release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE);
202
203 kfree(priv);
178} 204}
179 205
180module_init(mod_init); 206module_init(mod_init);
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index e7a245942029..e1d421a36a13 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -31,6 +31,9 @@
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/pci.h> 32#include <linux/pci.h>
33 33
34
35#define PFX KBUILD_MODNAME ": "
36
34#define GEODE_RNG_DATA_REG 0x50 37#define GEODE_RNG_DATA_REG 0x50
35#define GEODE_RNG_STATUS_REG 0x54 38#define GEODE_RNG_STATUS_REG 0x54
36 39
@@ -82,6 +85,7 @@ static struct hwrng geode_rng = {
82 85
83static int __init mod_init(void) 86static int __init mod_init(void)
84{ 87{
88 int err = -ENODEV;
85 struct pci_dev *pdev = NULL; 89 struct pci_dev *pdev = NULL;
86 const struct pci_device_id *ent; 90 const struct pci_device_id *ent;
87 void __iomem *mem; 91 void __iomem *mem;
@@ -89,27 +93,43 @@ static int __init mod_init(void)
89 93
90 for_each_pci_dev(pdev) { 94 for_each_pci_dev(pdev) {
91 ent = pci_match_id(pci_tbl, pdev); 95 ent = pci_match_id(pci_tbl, pdev);
92 if (ent) { 96 if (ent)
93 rng_base = pci_resource_start(pdev, 0); 97 goto found;
94 if (rng_base == 0)
95 return -ENODEV;
96
97 mem = devm_ioremap(&pdev->dev, rng_base, 0x58);
98 if (!mem)
99 return -ENOMEM;
100 geode_rng.priv = (unsigned long)mem;
101
102 pr_info("AMD Geode RNG detected\n");
103 return devm_hwrng_register(&pdev->dev, &geode_rng);
104 }
105 } 98 }
106
107 /* Device not found. */ 99 /* Device not found. */
108 return -ENODEV; 100 goto out;
101
102found:
103 rng_base = pci_resource_start(pdev, 0);
104 if (rng_base == 0)
105 goto out;
106 err = -ENOMEM;
107 mem = ioremap(rng_base, 0x58);
108 if (!mem)
109 goto out;
110 geode_rng.priv = (unsigned long)mem;
111
112 pr_info("AMD Geode RNG detected\n");
113 err = hwrng_register(&geode_rng);
114 if (err) {
115 pr_err(PFX "RNG registering failed (%d)\n",
116 err);
117 goto err_unmap;
118 }
119out:
120 return err;
121
122err_unmap:
123 iounmap(mem);
124 goto out;
109} 125}
110 126
111static void __exit mod_exit(void) 127static void __exit mod_exit(void)
112{ 128{
129 void __iomem *mem = (void __iomem *)geode_rng.priv;
130
131 hwrng_unregister(&geode_rng);
132 iounmap(mem);
113} 133}
114 134
115module_init(mod_init); 135module_init(mod_init);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 6d9cc2d39d22..7e4a9d1296bb 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -60,6 +60,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
60#endif 60#endif
61 61
62#ifdef CONFIG_STRICT_DEVMEM 62#ifdef CONFIG_STRICT_DEVMEM
63static inline int page_is_allowed(unsigned long pfn)
64{
65 return devmem_is_allowed(pfn);
66}
63static inline int range_is_allowed(unsigned long pfn, unsigned long size) 67static inline int range_is_allowed(unsigned long pfn, unsigned long size)
64{ 68{
65 u64 from = ((u64)pfn) << PAGE_SHIFT; 69 u64 from = ((u64)pfn) << PAGE_SHIFT;
@@ -75,6 +79,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
75 return 1; 79 return 1;
76} 80}
77#else 81#else
82static inline int page_is_allowed(unsigned long pfn)
83{
84 return 1;
85}
78static inline int range_is_allowed(unsigned long pfn, unsigned long size) 86static inline int range_is_allowed(unsigned long pfn, unsigned long size)
79{ 87{
80 return 1; 88 return 1;
@@ -122,23 +130,31 @@ static ssize_t read_mem(struct file *file, char __user *buf,
122 130
123 while (count > 0) { 131 while (count > 0) {
124 unsigned long remaining; 132 unsigned long remaining;
133 int allowed;
125 134
126 sz = size_inside_page(p, count); 135 sz = size_inside_page(p, count);
127 136
128 if (!range_is_allowed(p >> PAGE_SHIFT, count)) 137 allowed = page_is_allowed(p >> PAGE_SHIFT);
138 if (!allowed)
129 return -EPERM; 139 return -EPERM;
140 if (allowed == 2) {
141 /* Show zeros for restricted memory. */
142 remaining = clear_user(buf, sz);
143 } else {
144 /*
145 * On ia64 if a page has been mapped somewhere as
146 * uncached, then it must also be accessed uncached
147 * by the kernel or data corruption may occur.
148 */
149 ptr = xlate_dev_mem_ptr(p);
150 if (!ptr)
151 return -EFAULT;
130 152
131 /* 153 remaining = copy_to_user(buf, ptr, sz);
132 * On ia64 if a page has been mapped somewhere as uncached, then 154
133 * it must also be accessed uncached by the kernel or data 155 unxlate_dev_mem_ptr(p, ptr);
134 * corruption may occur. 156 }
135 */
136 ptr = xlate_dev_mem_ptr(p);
137 if (!ptr)
138 return -EFAULT;
139 157
140 remaining = copy_to_user(buf, ptr, sz);
141 unxlate_dev_mem_ptr(p, ptr);
142 if (remaining) 158 if (remaining)
143 return -EFAULT; 159 return -EFAULT;
144 160
@@ -181,30 +197,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
181#endif 197#endif
182 198
183 while (count > 0) { 199 while (count > 0) {
200 int allowed;
201
184 sz = size_inside_page(p, count); 202 sz = size_inside_page(p, count);
185 203
186 if (!range_is_allowed(p >> PAGE_SHIFT, sz)) 204 allowed = page_is_allowed(p >> PAGE_SHIFT);
205 if (!allowed)
187 return -EPERM; 206 return -EPERM;
188 207
189 /* 208 /* Skip actual writing when a page is marked as restricted. */
190 * On ia64 if a page has been mapped somewhere as uncached, then 209 if (allowed == 1) {
191 * it must also be accessed uncached by the kernel or data 210 /*
192 * corruption may occur. 211 * On ia64 if a page has been mapped somewhere as
193 */ 212 * uncached, then it must also be accessed uncached
194 ptr = xlate_dev_mem_ptr(p); 213 * by the kernel or data corruption may occur.
195 if (!ptr) { 214 */
196 if (written) 215 ptr = xlate_dev_mem_ptr(p);
197 break; 216 if (!ptr) {
198 return -EFAULT; 217 if (written)
199 } 218 break;
219 return -EFAULT;
220 }
200 221
201 copied = copy_from_user(ptr, buf, sz); 222 copied = copy_from_user(ptr, buf, sz);
202 unxlate_dev_mem_ptr(p, ptr); 223 unxlate_dev_mem_ptr(p, ptr);
203 if (copied) { 224 if (copied) {
204 written += sz - copied; 225 written += sz - copied;
205 if (written) 226 if (written)
206 break; 227 break;
207 return -EFAULT; 228 return -EFAULT;
229 }
208 } 230 }
209 231
210 buf += sz; 232 buf += sz;
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 2a558c706581..3e73bcdf9e65 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -84,11 +84,14 @@ struct pp_struct {
84 struct ieee1284_info state; 84 struct ieee1284_info state;
85 struct ieee1284_info saved_state; 85 struct ieee1284_info saved_state;
86 long default_inactivity; 86 long default_inactivity;
87 int index;
87}; 88};
88 89
89/* should we use PARDEVICE_MAX here? */ 90/* should we use PARDEVICE_MAX here? */
90static struct device *devices[PARPORT_MAX]; 91static struct device *devices[PARPORT_MAX];
91 92
93static DEFINE_IDA(ida_index);
94
92/* pp_struct.flags bitfields */ 95/* pp_struct.flags bitfields */
93#define PP_CLAIMED (1<<0) 96#define PP_CLAIMED (1<<0)
94#define PP_EXCL (1<<1) 97#define PP_EXCL (1<<1)
@@ -290,7 +293,7 @@ static int register_device(int minor, struct pp_struct *pp)
290 struct pardevice *pdev = NULL; 293 struct pardevice *pdev = NULL;
291 char *name; 294 char *name;
292 struct pardev_cb ppdev_cb; 295 struct pardev_cb ppdev_cb;
293 int rc = 0; 296 int rc = 0, index;
294 297
295 name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); 298 name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
296 if (name == NULL) 299 if (name == NULL)
@@ -303,20 +306,23 @@ static int register_device(int minor, struct pp_struct *pp)
303 goto err; 306 goto err;
304 } 307 }
305 308
309 index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
306 memset(&ppdev_cb, 0, sizeof(ppdev_cb)); 310 memset(&ppdev_cb, 0, sizeof(ppdev_cb));
307 ppdev_cb.irq_func = pp_irq; 311 ppdev_cb.irq_func = pp_irq;
308 ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; 312 ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
309 ppdev_cb.private = pp; 313 ppdev_cb.private = pp;
310 pdev = parport_register_dev_model(port, name, &ppdev_cb, minor); 314 pdev = parport_register_dev_model(port, name, &ppdev_cb, index);
311 parport_put_port(port); 315 parport_put_port(port);
312 316
313 if (!pdev) { 317 if (!pdev) {
314 pr_warn("%s: failed to register device!\n", name); 318 pr_warn("%s: failed to register device!\n", name);
315 rc = -ENXIO; 319 rc = -ENXIO;
320 ida_simple_remove(&ida_index, index);
316 goto err; 321 goto err;
317 } 322 }
318 323
319 pp->pdev = pdev; 324 pp->pdev = pdev;
325 pp->index = index;
320 dev_dbg(&pdev->dev, "registered pardevice\n"); 326 dev_dbg(&pdev->dev, "registered pardevice\n");
321err: 327err:
322 kfree(name); 328 kfree(name);
@@ -755,6 +761,7 @@ static int pp_release(struct inode *inode, struct file *file)
755 761
756 if (pp->pdev) { 762 if (pp->pdev) {
757 parport_unregister_device(pp->pdev); 763 parport_unregister_device(pp->pdev);
764 ida_simple_remove(&ida_index, pp->index);
758 pp->pdev = NULL; 765 pp->pdev = NULL;
759 pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); 766 pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
760 } 767 }
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index e9b7e0b3cabe..87fe111d0be6 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -2202,14 +2202,16 @@ static int virtcons_freeze(struct virtio_device *vdev)
2202 2202
2203 vdev->config->reset(vdev); 2203 vdev->config->reset(vdev);
2204 2204
2205 virtqueue_disable_cb(portdev->c_ivq); 2205 if (use_multiport(portdev))
2206 virtqueue_disable_cb(portdev->c_ivq);
2206 cancel_work_sync(&portdev->control_work); 2207 cancel_work_sync(&portdev->control_work);
2207 cancel_work_sync(&portdev->config_work); 2208 cancel_work_sync(&portdev->config_work);
2208 /* 2209 /*
2209 * Once more: if control_work_handler() was running, it would 2210 * Once more: if control_work_handler() was running, it would
2210 * enable the cb as the last step. 2211 * enable the cb as the last step.
2211 */ 2212 */
2212 virtqueue_disable_cb(portdev->c_ivq); 2213 if (use_multiport(portdev))
2214 virtqueue_disable_cb(portdev->c_ivq);
2213 remove_controlq_data(portdev); 2215 remove_controlq_data(portdev);
2214 2216
2215 list_for_each_entry(port, &portdev->ports, list) { 2217 list_for_each_entry(port, &portdev->ports, list) {
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index ab609a76706f..cf9449b3dbd9 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -429,6 +429,13 @@ static const struct clk_div_table pll_divp_table[] = {
429 { 0, 2 }, { 1, 4 }, { 2, 6 }, { 3, 8 }, { 0 } 429 { 0, 2 }, { 1, 4 }, { 2, 6 }, { 3, 8 }, { 0 }
430}; 430};
431 431
432static const struct clk_div_table pll_divq_table[] = {
433 { 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 },
434 { 8, 8 }, { 9, 9 }, { 10, 10 }, { 11, 11 }, { 12, 12 }, { 13, 13 },
435 { 14, 14 }, { 15, 15 },
436 { 0 }
437};
438
432static const struct clk_div_table pll_divr_table[] = { 439static const struct clk_div_table pll_divr_table[] = {
433 { 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 }, { 0 } 440 { 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 }, { 0 }
434}; 441};
@@ -496,9 +503,9 @@ struct stm32f4_div_data {
496 503
497#define MAX_PLL_DIV 3 504#define MAX_PLL_DIV 3
498static const struct stm32f4_div_data div_data[MAX_PLL_DIV] = { 505static const struct stm32f4_div_data div_data[MAX_PLL_DIV] = {
499 { 16, 2, 0, pll_divp_table }, 506 { 16, 2, 0, pll_divp_table },
500 { 24, 4, CLK_DIVIDER_ONE_BASED, NULL }, 507 { 24, 4, 0, pll_divq_table },
501 { 28, 3, 0, pll_divr_table }, 508 { 28, 3, 0, pll_divr_table },
502}; 509};
503 510
504struct stm32f4_pll_data { 511struct stm32f4_pll_data {
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 0fb39fe217d1..67201f67a14a 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -2502,7 +2502,7 @@ struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
2502 2502
2503 clk->core = hw->core; 2503 clk->core = hw->core;
2504 clk->dev_id = dev_id; 2504 clk->dev_id = dev_id;
2505 clk->con_id = con_id; 2505 clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
2506 clk->max_rate = ULONG_MAX; 2506 clk->max_rate = ULONG_MAX;
2507 2507
2508 clk_prepare_lock(); 2508 clk_prepare_lock();
@@ -2518,6 +2518,7 @@ void __clk_free_clk(struct clk *clk)
2518 hlist_del(&clk->clks_node); 2518 hlist_del(&clk->clks_node);
2519 clk_prepare_unlock(); 2519 clk_prepare_unlock();
2520 2520
2521 kfree_const(clk->con_id);
2521 kfree(clk); 2522 kfree(clk);
2522} 2523}
2523 2524
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index 924f560dcf80..00d4150e33c3 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -127,7 +127,7 @@ PNAME(mux_ddrphy_p) = { "dpll_ddr", "gpll_ddr" };
127PNAME(mux_pll_src_3plls_p) = { "apll", "dpll", "gpll" }; 127PNAME(mux_pll_src_3plls_p) = { "apll", "dpll", "gpll" };
128PNAME(mux_timer_p) = { "xin24m", "pclk_peri_src" }; 128PNAME(mux_timer_p) = { "xin24m", "pclk_peri_src" };
129 129
130PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll" "usb480m" }; 130PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll", "usb480m" };
131 131
132PNAME(mux_mmc_src_p) = { "apll", "dpll", "gpll", "xin24m" }; 132PNAME(mux_mmc_src_p) = { "apll", "dpll", "gpll", "xin24m" };
133PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" }; 133PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" };
@@ -450,6 +450,13 @@ static void __init rk3036_clk_init(struct device_node *np)
450 return; 450 return;
451 } 451 }
452 452
453 /*
454 * Make uart_pll_clk a child of the gpll, as all other sources are
455 * not that usable / stable.
456 */
457 writel_relaxed(HIWORD_UPDATE(0x2, 0x3, 10),
458 reg_base + RK2928_CLKSEL_CON(13));
459
453 ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); 460 ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
454 if (IS_ERR(ctx)) { 461 if (IS_ERR(ctx)) {
455 pr_err("%s: rockchip clk init failed\n", __func__); 462 pr_err("%s: rockchip clk init failed\n", __func__);
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index 695bbf9ef428..1c2357301017 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -1,6 +1,7 @@
1config SUNXI_CCU 1config SUNXI_CCU
2 bool "Clock support for Allwinner SoCs" 2 bool "Clock support for Allwinner SoCs"
3 depends on ARCH_SUNXI || COMPILE_TEST 3 depends on ARCH_SUNXI || COMPILE_TEST
4 select RESET_CONTROLLER
4 default ARCH_SUNXI 5 default ARCH_SUNXI
5 6
6if SUNXI_CCU 7if SUNXI_CCU
@@ -80,6 +81,7 @@ config SUN6I_A31_CCU
80 select SUNXI_CCU_DIV 81 select SUNXI_CCU_DIV
81 select SUNXI_CCU_NK 82 select SUNXI_CCU_NK
82 select SUNXI_CCU_NKM 83 select SUNXI_CCU_NKM
84 select SUNXI_CCU_NKMP
83 select SUNXI_CCU_NM 85 select SUNXI_CCU_NM
84 select SUNXI_CCU_MP 86 select SUNXI_CCU_MP
85 select SUNXI_CCU_PHASE 87 select SUNXI_CCU_PHASE
@@ -134,6 +136,7 @@ config SUN8I_V3S_CCU
134config SUN9I_A80_CCU 136config SUN9I_A80_CCU
135 bool "Support for the Allwinner A80 CCU" 137 bool "Support for the Allwinner A80 CCU"
136 select SUNXI_CCU_DIV 138 select SUNXI_CCU_DIV
139 select SUNXI_CCU_MULT
137 select SUNXI_CCU_GATE 140 select SUNXI_CCU_GATE
138 select SUNXI_CCU_NKMP 141 select SUNXI_CCU_NKMP
139 select SUNXI_CCU_NM 142 select SUNXI_CCU_NM
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index e3c084cc6da5..f54114c607df 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -566,7 +566,7 @@ static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu",
566 0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT); 566 0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT);
567 567
568/* Fixed Factor clocks */ 568/* Fixed Factor clocks */
569static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 1, 2, 0); 569static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 2, 1, 0);
570 570
571/* We hardcode the divider to 4 for now */ 571/* We hardcode the divider to 4 for now */
572static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", 572static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 4c9a920ff4ab..89e68d29bf45 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -608,7 +608,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", lcd_ch1_parents,
608 0x150, 0, 4, 24, 2, BIT(31), 608 0x150, 0, 4, 24, 2, BIT(31),
609 CLK_SET_RATE_PARENT); 609 CLK_SET_RATE_PARENT);
610 610
611static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(31), 0); 611static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(30), 0);
612 612
613static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0); 613static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0);
614 614
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index a7b3c08ed0e2..2c69b631967a 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -752,6 +752,13 @@ static const struct sunxi_ccu_desc sun8i_a33_ccu_desc = {
752 .num_resets = ARRAY_SIZE(sun8i_a33_ccu_resets), 752 .num_resets = ARRAY_SIZE(sun8i_a33_ccu_resets),
753}; 753};
754 754
755static struct ccu_pll_nb sun8i_a33_pll_cpu_nb = {
756 .common = &pll_cpux_clk.common,
757 /* copy from pll_cpux_clk */
758 .enable = BIT(31),
759 .lock = BIT(28),
760};
761
755static struct ccu_mux_nb sun8i_a33_cpu_nb = { 762static struct ccu_mux_nb sun8i_a33_cpu_nb = {
756 .common = &cpux_clk.common, 763 .common = &cpux_clk.common,
757 .cm = &cpux_clk.mux, 764 .cm = &cpux_clk.mux,
@@ -783,6 +790,10 @@ static void __init sun8i_a33_ccu_setup(struct device_node *node)
783 790
784 sunxi_ccu_probe(node, reg, &sun8i_a33_ccu_desc); 791 sunxi_ccu_probe(node, reg, &sun8i_a33_ccu_desc);
785 792
793 /* Gate then ungate PLL CPU after any rate changes */
794 ccu_pll_notifier_register(&sun8i_a33_pll_cpu_nb);
795
796 /* Reparent CPU during PLL CPU rate changes */
786 ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk, 797 ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
787 &sun8i_a33_cpu_nb); 798 &sun8i_a33_cpu_nb);
788} 799}
diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
index 8a47bafd7890..9d8724715a43 100644
--- a/drivers/clk/sunxi-ng/ccu_common.c
+++ b/drivers/clk/sunxi-ng/ccu_common.c
@@ -14,11 +14,13 @@
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 */ 15 */
16 16
17#include <linux/clk.h>
17#include <linux/clk-provider.h> 18#include <linux/clk-provider.h>
18#include <linux/iopoll.h> 19#include <linux/iopoll.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20 21
21#include "ccu_common.h" 22#include "ccu_common.h"
23#include "ccu_gate.h"
22#include "ccu_reset.h" 24#include "ccu_reset.h"
23 25
24static DEFINE_SPINLOCK(ccu_lock); 26static DEFINE_SPINLOCK(ccu_lock);
@@ -39,6 +41,53 @@ void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock)
39 WARN_ON(readl_relaxed_poll_timeout(addr, reg, reg & lock, 100, 70000)); 41 WARN_ON(readl_relaxed_poll_timeout(addr, reg, reg & lock, 100, 70000));
40} 42}
41 43
44/*
45 * This clock notifier is called when the frequency of a PLL clock is
46 * changed. In common PLL designs, changes to the dividers take effect
47 * almost immediately, while changes to the multipliers (implemented
48 * as dividers in the feedback loop) take a few cycles to work into
49 * the feedback loop for the PLL to stablize.
50 *
51 * Sometimes when the PLL clock rate is changed, the decrease in the
52 * divider is too much for the decrease in the multiplier to catch up.
53 * The PLL clock rate will spike, and in some cases, might lock up
54 * completely.
55 *
56 * This notifier callback will gate and then ungate the clock,
57 * effectively resetting it, so it proceeds to work. Care must be
58 * taken to reparent consumers to other temporary clocks during the
59 * rate change, and that this notifier callback must be the first
60 * to be registered.
61 */
62static int ccu_pll_notifier_cb(struct notifier_block *nb,
63 unsigned long event, void *data)
64{
65 struct ccu_pll_nb *pll = to_ccu_pll_nb(nb);
66 int ret = 0;
67
68 if (event != POST_RATE_CHANGE)
69 goto out;
70
71 ccu_gate_helper_disable(pll->common, pll->enable);
72
73 ret = ccu_gate_helper_enable(pll->common, pll->enable);
74 if (ret)
75 goto out;
76
77 ccu_helper_wait_for_lock(pll->common, pll->lock);
78
79out:
80 return notifier_from_errno(ret);
81}
82
83int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb)
84{
85 pll_nb->clk_nb.notifier_call = ccu_pll_notifier_cb;
86
87 return clk_notifier_register(pll_nb->common->hw.clk,
88 &pll_nb->clk_nb);
89}
90
42int sunxi_ccu_probe(struct device_node *node, void __iomem *reg, 91int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
43 const struct sunxi_ccu_desc *desc) 92 const struct sunxi_ccu_desc *desc)
44{ 93{
diff --git a/drivers/clk/sunxi-ng/ccu_common.h b/drivers/clk/sunxi-ng/ccu_common.h
index 73d81dc58fc5..d6fdd7a789aa 100644
--- a/drivers/clk/sunxi-ng/ccu_common.h
+++ b/drivers/clk/sunxi-ng/ccu_common.h
@@ -83,6 +83,18 @@ struct sunxi_ccu_desc {
83 83
84void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock); 84void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock);
85 85
86struct ccu_pll_nb {
87 struct notifier_block clk_nb;
88 struct ccu_common *common;
89
90 u32 enable;
91 u32 lock;
92};
93
94#define to_ccu_pll_nb(_nb) container_of(_nb, struct ccu_pll_nb, clk_nb)
95
96int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb);
97
86int sunxi_ccu_probe(struct device_node *node, void __iomem *reg, 98int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
87 const struct sunxi_ccu_desc *desc); 99 const struct sunxi_ccu_desc *desc);
88 100
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index 22c2ca7a2a22..b583f186a804 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -85,6 +85,10 @@ static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw,
85 unsigned int m, p; 85 unsigned int m, p;
86 u32 reg; 86 u32 reg;
87 87
88 /* Adjust parent_rate according to pre-dividers */
89 ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux,
90 -1, &parent_rate);
91
88 reg = readl(cmp->common.base + cmp->common.reg); 92 reg = readl(cmp->common.base + cmp->common.reg);
89 93
90 m = reg >> cmp->m.shift; 94 m = reg >> cmp->m.shift;
@@ -117,6 +121,10 @@ static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate,
117 unsigned int m, p; 121 unsigned int m, p;
118 u32 reg; 122 u32 reg;
119 123
124 /* Adjust parent_rate according to pre-dividers */
125 ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux,
126 -1, &parent_rate);
127
120 max_m = cmp->m.max ?: 1 << cmp->m.width; 128 max_m = cmp->m.max ?: 1 << cmp->m.width;
121 max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1); 129 max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
122 130
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index a2b40a000157..488055ed944f 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -107,7 +107,7 @@ static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw,
107 p = reg >> nkmp->p.shift; 107 p = reg >> nkmp->p.shift;
108 p &= (1 << nkmp->p.width) - 1; 108 p &= (1 << nkmp->p.width) - 1;
109 109
110 return parent_rate * n * k >> p / m; 110 return (parent_rate * n * k >> p) / m;
111} 111}
112 112
113static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate, 113static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clocksource/clkevt-probe.c b/drivers/clocksource/clkevt-probe.c
index 8c30fec86094..eb89b502acbd 100644
--- a/drivers/clocksource/clkevt-probe.c
+++ b/drivers/clocksource/clkevt-probe.c
@@ -17,7 +17,7 @@
17 17
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/of.h> 19#include <linux/of.h>
20#include <linux/clockchip.h> 20#include <linux/clockchips.h>
21 21
22extern struct of_device_id __clkevt_of_table[]; 22extern struct of_device_id __clkevt_of_table[];
23 23
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b8ff617d449d..0e3f6496524d 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -918,11 +918,19 @@ static struct kobj_type ktype_cpufreq = {
918 .release = cpufreq_sysfs_release, 918 .release = cpufreq_sysfs_release,
919}; 919};
920 920
921static int add_cpu_dev_symlink(struct cpufreq_policy *policy, 921static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
922 struct device *dev)
923{ 922{
923 struct device *dev = get_cpu_device(cpu);
924
925 if (!dev)
926 return;
927
928 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
929 return;
930
924 dev_dbg(dev, "%s: Adding symlink\n", __func__); 931 dev_dbg(dev, "%s: Adding symlink\n", __func__);
925 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); 932 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
933 dev_err(dev, "cpufreq symlink creation failed\n");
926} 934}
927 935
928static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, 936static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
@@ -1180,10 +1188,13 @@ static int cpufreq_online(unsigned int cpu)
1180 policy->user_policy.min = policy->min; 1188 policy->user_policy.min = policy->min;
1181 policy->user_policy.max = policy->max; 1189 policy->user_policy.max = policy->max;
1182 1190
1183 write_lock_irqsave(&cpufreq_driver_lock, flags); 1191 for_each_cpu(j, policy->related_cpus) {
1184 for_each_cpu(j, policy->related_cpus)
1185 per_cpu(cpufreq_cpu_data, j) = policy; 1192 per_cpu(cpufreq_cpu_data, j) = policy;
1186 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1193 add_cpu_dev_symlink(policy, j);
1194 }
1195 } else {
1196 policy->min = policy->user_policy.min;
1197 policy->max = policy->user_policy.max;
1187 } 1198 }
1188 1199
1189 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 1200 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
@@ -1272,13 +1283,15 @@ out_exit_policy:
1272 1283
1273 if (cpufreq_driver->exit) 1284 if (cpufreq_driver->exit)
1274 cpufreq_driver->exit(policy); 1285 cpufreq_driver->exit(policy);
1286
1287 for_each_cpu(j, policy->real_cpus)
1288 remove_cpu_dev_symlink(policy, get_cpu_device(j));
1289
1275out_free_policy: 1290out_free_policy:
1276 cpufreq_policy_free(policy); 1291 cpufreq_policy_free(policy);
1277 return ret; 1292 return ret;
1278} 1293}
1279 1294
1280static int cpufreq_offline(unsigned int cpu);
1281
1282/** 1295/**
1283 * cpufreq_add_dev - the cpufreq interface for a CPU device. 1296 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1284 * @dev: CPU device. 1297 * @dev: CPU device.
@@ -1300,16 +1313,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1300 1313
1301 /* Create sysfs link on CPU registration */ 1314 /* Create sysfs link on CPU registration */
1302 policy = per_cpu(cpufreq_cpu_data, cpu); 1315 policy = per_cpu(cpufreq_cpu_data, cpu);
1303 if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus)) 1316 if (policy)
1304 return 0; 1317 add_cpu_dev_symlink(policy, cpu);
1305 1318
1306 ret = add_cpu_dev_symlink(policy, dev); 1319 return 0;
1307 if (ret) {
1308 cpumask_clear_cpu(cpu, policy->real_cpus);
1309 cpufreq_offline(cpu);
1310 }
1311
1312 return ret;
1313} 1320}
1314 1321
1315static int cpufreq_offline(unsigned int cpu) 1322static int cpufreq_offline(unsigned int cpu)
@@ -2391,6 +2398,20 @@ EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2391 *********************************************************************/ 2398 *********************************************************************/
2392static enum cpuhp_state hp_online; 2399static enum cpuhp_state hp_online;
2393 2400
2401static int cpuhp_cpufreq_online(unsigned int cpu)
2402{
2403 cpufreq_online(cpu);
2404
2405 return 0;
2406}
2407
2408static int cpuhp_cpufreq_offline(unsigned int cpu)
2409{
2410 cpufreq_offline(cpu);
2411
2412 return 0;
2413}
2414
2394/** 2415/**
2395 * cpufreq_register_driver - register a CPU Frequency driver 2416 * cpufreq_register_driver - register a CPU Frequency driver
2396 * @driver_data: A struct cpufreq_driver containing the values# 2417 * @driver_data: A struct cpufreq_driver containing the values#
@@ -2453,8 +2474,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2453 } 2474 }
2454 2475
2455 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online", 2476 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
2456 cpufreq_online, 2477 cpuhp_cpufreq_online,
2457 cpufreq_offline); 2478 cpuhp_cpufreq_offline);
2458 if (ret < 0) 2479 if (ret < 0)
2459 goto err_if_unreg; 2480 goto err_if_unreg;
2460 hp_online = ret; 2481 hp_online = ret;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 08e134ffba68..283491f742d3 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -364,9 +364,7 @@ static bool driver_registered __read_mostly;
364static bool acpi_ppc; 364static bool acpi_ppc;
365#endif 365#endif
366 366
367static struct perf_limits performance_limits; 367static struct perf_limits global;
368static struct perf_limits powersave_limits;
369static struct perf_limits *limits;
370 368
371static void intel_pstate_init_limits(struct perf_limits *limits) 369static void intel_pstate_init_limits(struct perf_limits *limits)
372{ 370{
@@ -377,14 +375,6 @@ static void intel_pstate_init_limits(struct perf_limits *limits)
377 limits->max_sysfs_pct = 100; 375 limits->max_sysfs_pct = 100;
378} 376}
379 377
380static void intel_pstate_set_performance_limits(struct perf_limits *limits)
381{
382 intel_pstate_init_limits(limits);
383 limits->min_perf_pct = 100;
384 limits->min_perf = int_ext_tofp(1);
385 limits->min_sysfs_pct = 100;
386}
387
388static DEFINE_MUTEX(intel_pstate_driver_lock); 378static DEFINE_MUTEX(intel_pstate_driver_lock);
389static DEFINE_MUTEX(intel_pstate_limits_lock); 379static DEFINE_MUTEX(intel_pstate_limits_lock);
390 380
@@ -507,7 +497,7 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
507 * correct max turbo frequency based on the turbo state. 497 * correct max turbo frequency based on the turbo state.
508 * Also need to convert to MHz as _PSS freq is in MHz. 498 * Also need to convert to MHz as _PSS freq is in MHz.
509 */ 499 */
510 if (!limits->turbo_disabled) 500 if (!global.turbo_disabled)
511 cpu->acpi_perf_data.states[0].core_frequency = 501 cpu->acpi_perf_data.states[0].core_frequency =
512 policy->cpuinfo.max_freq / 1000; 502 policy->cpuinfo.max_freq / 1000;
513 cpu->valid_pss_table = true; 503 cpu->valid_pss_table = true;
@@ -626,7 +616,7 @@ static inline void update_turbo_state(void)
626 616
627 cpu = all_cpu_data[0]; 617 cpu = all_cpu_data[0];
628 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 618 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
629 limits->turbo_disabled = 619 global.turbo_disabled =
630 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 620 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
631 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 621 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
632} 622}
@@ -851,7 +841,7 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
851static void intel_pstate_hwp_set(struct cpufreq_policy *policy) 841static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
852{ 842{
853 int min, hw_min, max, hw_max, cpu; 843 int min, hw_min, max, hw_max, cpu;
854 struct perf_limits *perf_limits = limits; 844 struct perf_limits *perf_limits = &global;
855 u64 value, cap; 845 u64 value, cap;
856 846
857 for_each_cpu(cpu, policy->cpus) { 847 for_each_cpu(cpu, policy->cpus) {
@@ -863,19 +853,22 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
863 853
864 rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); 854 rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
865 hw_min = HWP_LOWEST_PERF(cap); 855 hw_min = HWP_LOWEST_PERF(cap);
866 if (limits->no_turbo) 856 if (global.no_turbo)
867 hw_max = HWP_GUARANTEED_PERF(cap); 857 hw_max = HWP_GUARANTEED_PERF(cap);
868 else 858 else
869 hw_max = HWP_HIGHEST_PERF(cap); 859 hw_max = HWP_HIGHEST_PERF(cap);
870 860
871 min = fp_ext_toint(hw_max * perf_limits->min_perf); 861 max = fp_ext_toint(hw_max * perf_limits->max_perf);
862 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
863 min = max;
864 else
865 min = fp_ext_toint(hw_max * perf_limits->min_perf);
872 866
873 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 867 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
874 868
875 value &= ~HWP_MIN_PERF(~0L); 869 value &= ~HWP_MIN_PERF(~0L);
876 value |= HWP_MIN_PERF(min); 870 value |= HWP_MIN_PERF(min);
877 871
878 max = fp_ext_toint(hw_max * perf_limits->max_perf);
879 value &= ~HWP_MAX_PERF(~0L); 872 value &= ~HWP_MAX_PERF(~0L);
880 value |= HWP_MAX_PERF(max); 873 value |= HWP_MAX_PERF(max);
881 874
@@ -968,20 +961,11 @@ static int intel_pstate_resume(struct cpufreq_policy *policy)
968} 961}
969 962
970static void intel_pstate_update_policies(void) 963static void intel_pstate_update_policies(void)
971 __releases(&intel_pstate_limits_lock)
972 __acquires(&intel_pstate_limits_lock)
973{ 964{
974 struct perf_limits *saved_limits = limits;
975 int cpu; 965 int cpu;
976 966
977 mutex_unlock(&intel_pstate_limits_lock);
978
979 for_each_possible_cpu(cpu) 967 for_each_possible_cpu(cpu)
980 cpufreq_update_policy(cpu); 968 cpufreq_update_policy(cpu);
981
982 mutex_lock(&intel_pstate_limits_lock);
983
984 limits = saved_limits;
985} 969}
986 970
987/************************** debugfs begin ************************/ 971/************************** debugfs begin ************************/
@@ -1060,7 +1044,7 @@ static void intel_pstate_debug_hide_params(void)
1060 static ssize_t show_##file_name \ 1044 static ssize_t show_##file_name \
1061 (struct kobject *kobj, struct attribute *attr, char *buf) \ 1045 (struct kobject *kobj, struct attribute *attr, char *buf) \
1062 { \ 1046 { \
1063 return sprintf(buf, "%u\n", limits->object); \ 1047 return sprintf(buf, "%u\n", global.object); \
1064 } 1048 }
1065 1049
1066static ssize_t intel_pstate_show_status(char *buf); 1050static ssize_t intel_pstate_show_status(char *buf);
@@ -1151,10 +1135,10 @@ static ssize_t show_no_turbo(struct kobject *kobj,
1151 } 1135 }
1152 1136
1153 update_turbo_state(); 1137 update_turbo_state();
1154 if (limits->turbo_disabled) 1138 if (global.turbo_disabled)
1155 ret = sprintf(buf, "%u\n", limits->turbo_disabled); 1139 ret = sprintf(buf, "%u\n", global.turbo_disabled);
1156 else 1140 else
1157 ret = sprintf(buf, "%u\n", limits->no_turbo); 1141 ret = sprintf(buf, "%u\n", global.no_turbo);
1158 1142
1159 mutex_unlock(&intel_pstate_driver_lock); 1143 mutex_unlock(&intel_pstate_driver_lock);
1160 1144
@@ -1181,19 +1165,19 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
1181 mutex_lock(&intel_pstate_limits_lock); 1165 mutex_lock(&intel_pstate_limits_lock);
1182 1166
1183 update_turbo_state(); 1167 update_turbo_state();
1184 if (limits->turbo_disabled) { 1168 if (global.turbo_disabled) {
1185 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 1169 pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
1186 mutex_unlock(&intel_pstate_limits_lock); 1170 mutex_unlock(&intel_pstate_limits_lock);
1187 mutex_unlock(&intel_pstate_driver_lock); 1171 mutex_unlock(&intel_pstate_driver_lock);
1188 return -EPERM; 1172 return -EPERM;
1189 } 1173 }
1190 1174
1191 limits->no_turbo = clamp_t(int, input, 0, 1); 1175 global.no_turbo = clamp_t(int, input, 0, 1);
1192
1193 intel_pstate_update_policies();
1194 1176
1195 mutex_unlock(&intel_pstate_limits_lock); 1177 mutex_unlock(&intel_pstate_limits_lock);
1196 1178
1179 intel_pstate_update_policies();
1180
1197 mutex_unlock(&intel_pstate_driver_lock); 1181 mutex_unlock(&intel_pstate_driver_lock);
1198 1182
1199 return count; 1183 return count;
@@ -1218,19 +1202,16 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
1218 1202
1219 mutex_lock(&intel_pstate_limits_lock); 1203 mutex_lock(&intel_pstate_limits_lock);
1220 1204
1221 limits->max_sysfs_pct = clamp_t(int, input, 0 , 100); 1205 global.max_sysfs_pct = clamp_t(int, input, 0 , 100);
1222 limits->max_perf_pct = min(limits->max_policy_pct, 1206 global.max_perf_pct = min(global.max_policy_pct, global.max_sysfs_pct);
1223 limits->max_sysfs_pct); 1207 global.max_perf_pct = max(global.min_policy_pct, global.max_perf_pct);
1224 limits->max_perf_pct = max(limits->min_policy_pct, 1208 global.max_perf_pct = max(global.min_perf_pct, global.max_perf_pct);
1225 limits->max_perf_pct); 1209 global.max_perf = percent_ext_fp(global.max_perf_pct);
1226 limits->max_perf_pct = max(limits->min_perf_pct,
1227 limits->max_perf_pct);
1228 limits->max_perf = percent_ext_fp(limits->max_perf_pct);
1229
1230 intel_pstate_update_policies();
1231 1210
1232 mutex_unlock(&intel_pstate_limits_lock); 1211 mutex_unlock(&intel_pstate_limits_lock);
1233 1212
1213 intel_pstate_update_policies();
1214
1234 mutex_unlock(&intel_pstate_driver_lock); 1215 mutex_unlock(&intel_pstate_driver_lock);
1235 1216
1236 return count; 1217 return count;
@@ -1255,19 +1236,16 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
1255 1236
1256 mutex_lock(&intel_pstate_limits_lock); 1237 mutex_lock(&intel_pstate_limits_lock);
1257 1238
1258 limits->min_sysfs_pct = clamp_t(int, input, 0 , 100); 1239 global.min_sysfs_pct = clamp_t(int, input, 0 , 100);
1259 limits->min_perf_pct = max(limits->min_policy_pct, 1240 global.min_perf_pct = max(global.min_policy_pct, global.min_sysfs_pct);
1260 limits->min_sysfs_pct); 1241 global.min_perf_pct = min(global.max_policy_pct, global.min_perf_pct);
1261 limits->min_perf_pct = min(limits->max_policy_pct, 1242 global.min_perf_pct = min(global.max_perf_pct, global.min_perf_pct);
1262 limits->min_perf_pct); 1243 global.min_perf = percent_ext_fp(global.min_perf_pct);
1263 limits->min_perf_pct = min(limits->max_perf_pct,
1264 limits->min_perf_pct);
1265 limits->min_perf = percent_ext_fp(limits->min_perf_pct);
1266
1267 intel_pstate_update_policies();
1268 1244
1269 mutex_unlock(&intel_pstate_limits_lock); 1245 mutex_unlock(&intel_pstate_limits_lock);
1270 1246
1247 intel_pstate_update_policies();
1248
1271 mutex_unlock(&intel_pstate_driver_lock); 1249 mutex_unlock(&intel_pstate_driver_lock);
1272 1250
1273 return count; 1251 return count;
@@ -1387,7 +1365,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
1387 u32 vid; 1365 u32 vid;
1388 1366
1389 val = (u64)pstate << 8; 1367 val = (u64)pstate << 8;
1390 if (limits->no_turbo && !limits->turbo_disabled) 1368 if (global.no_turbo && !global.turbo_disabled)
1391 val |= (u64)1 << 32; 1369 val |= (u64)1 << 32;
1392 1370
1393 vid_fp = cpudata->vid.min + mul_fp( 1371 vid_fp = cpudata->vid.min + mul_fp(
@@ -1557,7 +1535,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
1557 u64 val; 1535 u64 val;
1558 1536
1559 val = (u64)pstate << 8; 1537 val = (u64)pstate << 8;
1560 if (limits->no_turbo && !limits->turbo_disabled) 1538 if (global.no_turbo && !global.turbo_disabled)
1561 val |= (u64)1 << 32; 1539 val |= (u64)1 << 32;
1562 1540
1563 return val; 1541 return val;
@@ -1683,9 +1661,9 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
1683 int max_perf = cpu->pstate.turbo_pstate; 1661 int max_perf = cpu->pstate.turbo_pstate;
1684 int max_perf_adj; 1662 int max_perf_adj;
1685 int min_perf; 1663 int min_perf;
1686 struct perf_limits *perf_limits = limits; 1664 struct perf_limits *perf_limits = &global;
1687 1665
1688 if (limits->no_turbo || limits->turbo_disabled) 1666 if (global.no_turbo || global.turbo_disabled)
1689 max_perf = cpu->pstate.max_pstate; 1667 max_perf = cpu->pstate.max_pstate;
1690 1668
1691 if (per_cpu_limits) 1669 if (per_cpu_limits)
@@ -1820,7 +1798,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
1820 1798
1821 sample->busy_scaled = busy_frac * 100; 1799 sample->busy_scaled = busy_frac * 100;
1822 1800
1823 target = limits->no_turbo || limits->turbo_disabled ? 1801 target = global.no_turbo || global.turbo_disabled ?
1824 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 1802 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
1825 target += target >> 2; 1803 target += target >> 2;
1826 target = mul_fp(target, busy_frac); 1804 target = mul_fp(target, busy_frac);
@@ -2116,7 +2094,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
2116static int intel_pstate_set_policy(struct cpufreq_policy *policy) 2094static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2117{ 2095{
2118 struct cpudata *cpu; 2096 struct cpudata *cpu;
2119 struct perf_limits *perf_limits = NULL; 2097 struct perf_limits *perf_limits = &global;
2120 2098
2121 if (!policy->cpuinfo.max_freq) 2099 if (!policy->cpuinfo.max_freq)
2122 return -ENODEV; 2100 return -ENODEV;
@@ -2139,21 +2117,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2139 2117
2140 mutex_lock(&intel_pstate_limits_lock); 2118 mutex_lock(&intel_pstate_limits_lock);
2141 2119
2142 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
2143 pr_debug("set performance\n");
2144 if (!perf_limits) {
2145 limits = &performance_limits;
2146 perf_limits = limits;
2147 }
2148 } else {
2149 pr_debug("set powersave\n");
2150 if (!perf_limits) {
2151 limits = &powersave_limits;
2152 perf_limits = limits;
2153 }
2154
2155 }
2156
2157 intel_pstate_update_perf_limits(policy, perf_limits); 2120 intel_pstate_update_perf_limits(policy, perf_limits);
2158 2121
2159 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { 2122 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
@@ -2177,16 +2140,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2177static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 2140static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
2178{ 2141{
2179 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2142 struct cpudata *cpu = all_cpu_data[policy->cpu];
2180 struct perf_limits *perf_limits;
2181
2182 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
2183 perf_limits = &performance_limits;
2184 else
2185 perf_limits = &powersave_limits;
2186 2143
2187 update_turbo_state(); 2144 update_turbo_state();
2188 policy->cpuinfo.max_freq = perf_limits->turbo_disabled || 2145 policy->cpuinfo.max_freq = global.turbo_disabled || global.no_turbo ?
2189 perf_limits->no_turbo ?
2190 cpu->pstate.max_freq : 2146 cpu->pstate.max_freq :
2191 cpu->pstate.turbo_freq; 2147 cpu->pstate.turbo_freq;
2192 2148
@@ -2201,9 +2157,9 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
2201 unsigned int max_freq, min_freq; 2157 unsigned int max_freq, min_freq;
2202 2158
2203 max_freq = policy->cpuinfo.max_freq * 2159 max_freq = policy->cpuinfo.max_freq *
2204 perf_limits->max_sysfs_pct / 100; 2160 global.max_sysfs_pct / 100;
2205 min_freq = policy->cpuinfo.max_freq * 2161 min_freq = policy->cpuinfo.max_freq *
2206 perf_limits->min_sysfs_pct / 100; 2162 global.min_sysfs_pct / 100;
2207 cpufreq_verify_within_limits(policy, min_freq, max_freq); 2163 cpufreq_verify_within_limits(policy, min_freq, max_freq);
2208 } 2164 }
2209 2165
@@ -2255,7 +2211,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
2255 /* cpuinfo and default policy values */ 2211 /* cpuinfo and default policy values */
2256 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 2212 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
2257 update_turbo_state(); 2213 update_turbo_state();
2258 policy->cpuinfo.max_freq = limits->turbo_disabled ? 2214 policy->cpuinfo.max_freq = global.turbo_disabled ?
2259 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 2215 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
2260 policy->cpuinfo.max_freq *= cpu->pstate.scaling; 2216 policy->cpuinfo.max_freq *= cpu->pstate.scaling;
2261 2217
@@ -2275,7 +2231,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
2275 return ret; 2231 return ret;
2276 2232
2277 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 2233 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
2278 if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100) 2234 if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE))
2279 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 2235 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
2280 else 2236 else
2281 policy->policy = CPUFREQ_POLICY_POWERSAVE; 2237 policy->policy = CPUFREQ_POLICY_POWERSAVE;
@@ -2301,7 +2257,7 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
2301 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2257 struct cpudata *cpu = all_cpu_data[policy->cpu];
2302 2258
2303 update_turbo_state(); 2259 update_turbo_state();
2304 policy->cpuinfo.max_freq = limits->turbo_disabled ? 2260 policy->cpuinfo.max_freq = global.no_turbo || global.turbo_disabled ?
2305 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2261 cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2306 2262
2307 cpufreq_verify_within_cpu_limits(policy); 2263 cpufreq_verify_within_cpu_limits(policy);
@@ -2309,26 +2265,6 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
2309 return 0; 2265 return 0;
2310} 2266}
2311 2267
2312static unsigned int intel_cpufreq_turbo_update(struct cpudata *cpu,
2313 struct cpufreq_policy *policy,
2314 unsigned int target_freq)
2315{
2316 unsigned int max_freq;
2317
2318 update_turbo_state();
2319
2320 max_freq = limits->no_turbo || limits->turbo_disabled ?
2321 cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2322 policy->cpuinfo.max_freq = max_freq;
2323 if (policy->max > max_freq)
2324 policy->max = max_freq;
2325
2326 if (target_freq > max_freq)
2327 target_freq = max_freq;
2328
2329 return target_freq;
2330}
2331
2332static int intel_cpufreq_target(struct cpufreq_policy *policy, 2268static int intel_cpufreq_target(struct cpufreq_policy *policy,
2333 unsigned int target_freq, 2269 unsigned int target_freq,
2334 unsigned int relation) 2270 unsigned int relation)
@@ -2337,8 +2273,10 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
2337 struct cpufreq_freqs freqs; 2273 struct cpufreq_freqs freqs;
2338 int target_pstate; 2274 int target_pstate;
2339 2275
2276 update_turbo_state();
2277
2340 freqs.old = policy->cur; 2278 freqs.old = policy->cur;
2341 freqs.new = intel_cpufreq_turbo_update(cpu, policy, target_freq); 2279 freqs.new = target_freq;
2342 2280
2343 cpufreq_freq_transition_begin(policy, &freqs); 2281 cpufreq_freq_transition_begin(policy, &freqs);
2344 switch (relation) { 2282 switch (relation) {
@@ -2370,7 +2308,8 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
2370 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2308 struct cpudata *cpu = all_cpu_data[policy->cpu];
2371 int target_pstate; 2309 int target_pstate;
2372 2310
2373 target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq); 2311 update_turbo_state();
2312
2374 target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); 2313 target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
2375 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2314 target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
2376 intel_pstate_update_pstate(cpu, target_pstate); 2315 intel_pstate_update_pstate(cpu, target_pstate);
@@ -2425,13 +2364,7 @@ static int intel_pstate_register_driver(void)
2425{ 2364{
2426 int ret; 2365 int ret;
2427 2366
2428 intel_pstate_init_limits(&powersave_limits); 2367 intel_pstate_init_limits(&global);
2429 intel_pstate_set_performance_limits(&performance_limits);
2430 if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) &&
2431 intel_pstate_driver == &intel_pstate)
2432 limits = &performance_limits;
2433 else
2434 limits = &powersave_limits;
2435 2368
2436 ret = cpufreq_register_driver(intel_pstate_driver); 2369 ret = cpufreq_register_driver(intel_pstate_driver);
2437 if (ret) { 2370 if (ret) {
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 370593006f5f..cda8f62d555b 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -175,6 +175,24 @@ static int powernv_cpuidle_driver_init(void)
175 drv->state_count += 1; 175 drv->state_count += 1;
176 } 176 }
177 177
178 /*
179 * On the PowerNV platform cpu_present may be less than cpu_possible in
180 * cases when firmware detects the CPU, but it is not available to the
181 * OS. If CONFIG_HOTPLUG_CPU=n, then such CPUs are not hotplugable at
182 * run time and hence cpu_devices are not created for those CPUs by the
183 * generic topology_init().
184 *
185 * drv->cpumask defaults to cpu_possible_mask in
186 * __cpuidle_driver_init(). This breaks cpuidle on PowerNV where
187 * cpu_devices are not created for CPUs in cpu_possible_mask that
188 * cannot be hot-added later at run time.
189 *
190 * Trying cpuidle_register_device() on a CPU without a cpu_device is
191 * incorrect, so pass a correct CPU mask to the generic cpuidle driver.
192 */
193
194 drv->cpumask = (struct cpumask *)cpu_present_mask;
195
178 return 0; 196 return 0;
179} 197}
180 198
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index c5adc8c9ac43..ae948b1da93a 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -615,6 +615,18 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
615 struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); 615 struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
616 int error; 616 int error;
617 617
618 /*
619 * Return if cpu_device is not setup for this CPU.
620 *
621 * This could happen if the arch did not set up cpu_device
622 * since this CPU is not in cpu_present mask and the
623 * driver did not send a correct CPU mask during registration.
624 * Without this check we would end up passing bogus
625 * value for &cpu_dev->kobj in kobject_init_and_add()
626 */
627 if (!cpu_dev)
628 return -ENODEV;
629
618 kdev = kzalloc(sizeof(*kdev), GFP_KERNEL); 630 kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
619 if (!kdev) 631 if (!kdev)
620 return -ENOMEM; 632 return -ENOMEM;
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 32100c4851dd..49cbdcba7883 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -506,7 +506,7 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
506 ctx->dev = caam_jr_alloc(); 506 ctx->dev = caam_jr_alloc();
507 507
508 if (IS_ERR(ctx->dev)) { 508 if (IS_ERR(ctx->dev)) {
509 dev_err(ctx->dev, "Job Ring Device allocation for transform failed\n"); 509 pr_err("Job Ring Device allocation for transform failed\n");
510 return PTR_ERR(ctx->dev); 510 return PTR_ERR(ctx->dev);
511 } 511 }
512 512
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index fef39f9f41ee..5d7f73d60515 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -281,7 +281,8 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
281 /* Try to run it through DECO0 */ 281 /* Try to run it through DECO0 */
282 ret = run_descriptor_deco0(ctrldev, desc, &status); 282 ret = run_descriptor_deco0(ctrldev, desc, &status);
283 283
284 if (ret || status) { 284 if (ret ||
285 (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
285 dev_err(ctrldev, 286 dev_err(ctrldev,
286 "Failed to deinstantiate RNG4 SH%d\n", 287 "Failed to deinstantiate RNG4 SH%d\n",
287 sh_idx); 288 sh_idx);
@@ -301,15 +302,13 @@ static int caam_remove(struct platform_device *pdev)
301 struct device *ctrldev; 302 struct device *ctrldev;
302 struct caam_drv_private *ctrlpriv; 303 struct caam_drv_private *ctrlpriv;
303 struct caam_ctrl __iomem *ctrl; 304 struct caam_ctrl __iomem *ctrl;
304 int ring;
305 305
306 ctrldev = &pdev->dev; 306 ctrldev = &pdev->dev;
307 ctrlpriv = dev_get_drvdata(ctrldev); 307 ctrlpriv = dev_get_drvdata(ctrldev);
308 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; 308 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
309 309
310 /* Remove platform devices for JobRs */ 310 /* Remove platform devices under the crypto node */
311 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) 311 of_platform_depopulate(ctrldev);
312 of_device_unregister(ctrlpriv->jrpdev[ring]);
313 312
314 /* De-initialize RNG state handles initialized by this driver. */ 313 /* De-initialize RNG state handles initialized by this driver. */
315 if (ctrlpriv->rng4_sh_init) 314 if (ctrlpriv->rng4_sh_init)
@@ -418,10 +417,21 @@ DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
418DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n"); 417DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
419#endif 418#endif
420 419
420static const struct of_device_id caam_match[] = {
421 {
422 .compatible = "fsl,sec-v4.0",
423 },
424 {
425 .compatible = "fsl,sec4.0",
426 },
427 {},
428};
429MODULE_DEVICE_TABLE(of, caam_match);
430
421/* Probe routine for CAAM top (controller) level */ 431/* Probe routine for CAAM top (controller) level */
422static int caam_probe(struct platform_device *pdev) 432static int caam_probe(struct platform_device *pdev)
423{ 433{
424 int ret, ring, ridx, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; 434 int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
425 u64 caam_id; 435 u64 caam_id;
426 struct device *dev; 436 struct device *dev;
427 struct device_node *nprop, *np; 437 struct device_node *nprop, *np;
@@ -597,47 +607,24 @@ static int caam_probe(struct platform_device *pdev)
597 goto iounmap_ctrl; 607 goto iounmap_ctrl;
598 } 608 }
599 609
600 /* 610 ret = of_platform_populate(nprop, caam_match, NULL, dev);
601 * Detect and enable JobRs 611 if (ret) {
602 * First, find out how many ring spec'ed, allocate references 612 dev_err(dev, "JR platform devices creation error\n");
603 * for all, then go probe each one.
604 */
605 rspec = 0;
606 for_each_available_child_of_node(nprop, np)
607 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
608 of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
609 rspec++;
610
611 ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
612 sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
613 if (ctrlpriv->jrpdev == NULL) {
614 ret = -ENOMEM;
615 goto iounmap_ctrl; 613 goto iounmap_ctrl;
616 } 614 }
617 615
618 ring = 0; 616 ring = 0;
619 ridx = 0;
620 ctrlpriv->total_jobrs = 0;
621 for_each_available_child_of_node(nprop, np) 617 for_each_available_child_of_node(nprop, np)
622 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || 618 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
623 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) { 619 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
624 ctrlpriv->jrpdev[ring] =
625 of_platform_device_create(np, NULL, dev);
626 if (!ctrlpriv->jrpdev[ring]) {
627 pr_warn("JR physical index %d: Platform device creation error\n",
628 ridx);
629 ridx++;
630 continue;
631 }
632 ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *) 620 ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
633 ((__force uint8_t *)ctrl + 621 ((__force uint8_t *)ctrl +
634 (ridx + JR_BLOCK_NUMBER) * 622 (ring + JR_BLOCK_NUMBER) *
635 BLOCK_OFFSET 623 BLOCK_OFFSET
636 ); 624 );
637 ctrlpriv->total_jobrs++; 625 ctrlpriv->total_jobrs++;
638 ring++; 626 ring++;
639 ridx++; 627 }
640 }
641 628
642 /* Check to see if QI present. If so, enable */ 629 /* Check to see if QI present. If so, enable */
643 ctrlpriv->qi_present = 630 ctrlpriv->qi_present =
@@ -847,17 +834,6 @@ disable_caam_ipg:
847 return ret; 834 return ret;
848} 835}
849 836
850static struct of_device_id caam_match[] = {
851 {
852 .compatible = "fsl,sec-v4.0",
853 },
854 {
855 .compatible = "fsl,sec4.0",
856 },
857 {},
858};
859MODULE_DEVICE_TABLE(of, caam_match);
860
861static struct platform_driver caam_driver = { 837static struct platform_driver caam_driver = {
862 .driver = { 838 .driver = {
863 .name = "caam", 839 .name = "caam",
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index e2bcacc1a921..dbed8baeebe5 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -66,7 +66,6 @@ struct caam_drv_private_jr {
66struct caam_drv_private { 66struct caam_drv_private {
67 67
68 struct device *dev; 68 struct device *dev;
69 struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
70 struct platform_device *pdev; 69 struct platform_device *pdev;
71 70
72 /* Physical-presence section */ 71 /* Physical-presence section */
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 41cc853f8569..fc08b4ed69d9 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -1015,6 +1015,7 @@ const struct ccp_vdata ccpv5a = {
1015 1015
1016const struct ccp_vdata ccpv5b = { 1016const struct ccp_vdata ccpv5b = {
1017 .version = CCP_VERSION(5, 0), 1017 .version = CCP_VERSION(5, 0),
1018 .dma_chan_attr = DMA_PRIVATE,
1018 .setup = ccp5other_config, 1019 .setup = ccp5other_config,
1019 .perform = &ccp5_actions, 1020 .perform = &ccp5_actions,
1020 .bar = 2, 1021 .bar = 2,
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 511ab042b5e7..92d1c6959f08 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -283,11 +283,14 @@ EXPORT_SYMBOL_GPL(ccp_version);
283 */ 283 */
284int ccp_enqueue_cmd(struct ccp_cmd *cmd) 284int ccp_enqueue_cmd(struct ccp_cmd *cmd)
285{ 285{
286 struct ccp_device *ccp = ccp_get_device(); 286 struct ccp_device *ccp;
287 unsigned long flags; 287 unsigned long flags;
288 unsigned int i; 288 unsigned int i;
289 int ret; 289 int ret;
290 290
291 /* Some commands might need to be sent to a specific device */
292 ccp = cmd->ccp ? cmd->ccp : ccp_get_device();
293
291 if (!ccp) 294 if (!ccp)
292 return -ENODEV; 295 return -ENODEV;
293 296
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 2b5c01fade05..aa36f3f81860 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -179,6 +179,10 @@
179 179
180/* ------------------------ General CCP Defines ------------------------ */ 180/* ------------------------ General CCP Defines ------------------------ */
181 181
182#define CCP_DMA_DFLT 0x0
183#define CCP_DMA_PRIV 0x1
184#define CCP_DMA_PUB 0x2
185
182#define CCP_DMAPOOL_MAX_SIZE 64 186#define CCP_DMAPOOL_MAX_SIZE 64
183#define CCP_DMAPOOL_ALIGN BIT(5) 187#define CCP_DMAPOOL_ALIGN BIT(5)
184 188
@@ -636,6 +640,7 @@ struct ccp_actions {
636/* Structure to hold CCP version-specific values */ 640/* Structure to hold CCP version-specific values */
637struct ccp_vdata { 641struct ccp_vdata {
638 const unsigned int version; 642 const unsigned int version;
643 const unsigned int dma_chan_attr;
639 void (*setup)(struct ccp_device *); 644 void (*setup)(struct ccp_device *);
640 const struct ccp_actions *perform; 645 const struct ccp_actions *perform;
641 const unsigned int bar; 646 const unsigned int bar;
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index e5d9278f4019..e00be01fbf5a 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -10,6 +10,7 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/module.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/dmaengine.h> 15#include <linux/dmaengine.h>
15#include <linux/spinlock.h> 16#include <linux/spinlock.h>
@@ -25,6 +26,37 @@
25 (mask == 0) ? 64 : fls64(mask); \ 26 (mask == 0) ? 64 : fls64(mask); \
26}) 27})
27 28
29/* The CCP as a DMA provider can be configured for public or private
30 * channels. Default is specified in the vdata for the device (PCI ID).
31 * This module parameter will override for all channels on all devices:
32 * dma_chan_attr = 0x2 to force all channels public
33 * = 0x1 to force all channels private
34 * = 0x0 to defer to the vdata setting
35 * = any other value: warning, revert to 0x0
36 */
37static unsigned int dma_chan_attr = CCP_DMA_DFLT;
38module_param(dma_chan_attr, uint, 0444);
39MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public");
40
41unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp)
42{
43 switch (dma_chan_attr) {
44 case CCP_DMA_DFLT:
45 return ccp->vdata->dma_chan_attr;
46
47 case CCP_DMA_PRIV:
48 return DMA_PRIVATE;
49
50 case CCP_DMA_PUB:
51 return 0;
52
53 default:
54 dev_info_once(ccp->dev, "Invalid value for dma_chan_attr: %d\n",
55 dma_chan_attr);
56 return ccp->vdata->dma_chan_attr;
57 }
58}
59
28static void ccp_free_cmd_resources(struct ccp_device *ccp, 60static void ccp_free_cmd_resources(struct ccp_device *ccp,
29 struct list_head *list) 61 struct list_head *list)
30{ 62{
@@ -390,6 +422,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
390 goto err; 422 goto err;
391 423
392 ccp_cmd = &cmd->ccp_cmd; 424 ccp_cmd = &cmd->ccp_cmd;
425 ccp_cmd->ccp = chan->ccp;
393 ccp_pt = &ccp_cmd->u.passthru_nomap; 426 ccp_pt = &ccp_cmd->u.passthru_nomap;
394 ccp_cmd->flags = CCP_CMD_MAY_BACKLOG; 427 ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
395 ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP; 428 ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
@@ -674,6 +707,15 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
674 dma_cap_set(DMA_SG, dma_dev->cap_mask); 707 dma_cap_set(DMA_SG, dma_dev->cap_mask);
675 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); 708 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
676 709
710 /* The DMA channels for this device can be set to public or private,
711 * and overridden by the module parameter dma_chan_attr.
712 * Default: according to the value in vdata (dma_chan_attr=0)
713 * dma_chan_attr=0x1: all channels private (override vdata)
714 * dma_chan_attr=0x2: all channels public (override vdata)
715 */
716 if (ccp_get_dma_chan_attr(ccp) == DMA_PRIVATE)
717 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
718
677 INIT_LIST_HEAD(&dma_dev->channels); 719 INIT_LIST_HEAD(&dma_dev->channels);
678 for (i = 0; i < ccp->cmd_q_count; i++) { 720 for (i = 0; i < ccp->cmd_q_count; i++) {
679 chan = ccp->ccp_dma_chan + i; 721 chan = ccp->ccp_dma_chan + i;
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index 3e2ab3b14eea..9e95bf94eb13 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -2,6 +2,7 @@ menuconfig DEV_DAX
2 tristate "DAX: direct access to differentiated memory" 2 tristate "DAX: direct access to differentiated memory"
3 default m if NVDIMM_DAX 3 default m if NVDIMM_DAX
4 depends on TRANSPARENT_HUGEPAGE 4 depends on TRANSPARENT_HUGEPAGE
5 select SRCU
5 help 6 help
6 Support raw access to differentiated (persistence, bandwidth, 7 Support raw access to differentiated (persistence, bandwidth,
7 latency...) memory via an mmap(2) capable character 8 latency...) memory via an mmap(2) capable character
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 80c6db279ae1..806f180c80d8 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -25,6 +25,7 @@
25#include "dax.h" 25#include "dax.h"
26 26
27static dev_t dax_devt; 27static dev_t dax_devt;
28DEFINE_STATIC_SRCU(dax_srcu);
28static struct class *dax_class; 29static struct class *dax_class;
29static DEFINE_IDA(dax_minor_ida); 30static DEFINE_IDA(dax_minor_ida);
30static int nr_dax = CONFIG_NR_DEV_DAX; 31static int nr_dax = CONFIG_NR_DEV_DAX;
@@ -60,7 +61,7 @@ struct dax_region {
60 * @region - parent region 61 * @region - parent region
61 * @dev - device backing the character device 62 * @dev - device backing the character device
62 * @cdev - core chardev data 63 * @cdev - core chardev data
63 * @alive - !alive + rcu grace period == no new mappings can be established 64 * @alive - !alive + srcu grace period == no new mappings can be established
64 * @id - child id in the region 65 * @id - child id in the region
65 * @num_resources - number of physical address extents in this device 66 * @num_resources - number of physical address extents in this device
66 * @res - array of physical address ranges 67 * @res - array of physical address ranges
@@ -569,7 +570,7 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
569static int dax_dev_huge_fault(struct vm_fault *vmf, 570static int dax_dev_huge_fault(struct vm_fault *vmf,
570 enum page_entry_size pe_size) 571 enum page_entry_size pe_size)
571{ 572{
572 int rc; 573 int rc, id;
573 struct file *filp = vmf->vma->vm_file; 574 struct file *filp = vmf->vma->vm_file;
574 struct dax_dev *dax_dev = filp->private_data; 575 struct dax_dev *dax_dev = filp->private_data;
575 576
@@ -578,7 +579,7 @@ static int dax_dev_huge_fault(struct vm_fault *vmf,
578 ? "write" : "read", 579 ? "write" : "read",
579 vmf->vma->vm_start, vmf->vma->vm_end); 580 vmf->vma->vm_start, vmf->vma->vm_end);
580 581
581 rcu_read_lock(); 582 id = srcu_read_lock(&dax_srcu);
582 switch (pe_size) { 583 switch (pe_size) {
583 case PE_SIZE_PTE: 584 case PE_SIZE_PTE:
584 rc = __dax_dev_pte_fault(dax_dev, vmf); 585 rc = __dax_dev_pte_fault(dax_dev, vmf);
@@ -592,7 +593,7 @@ static int dax_dev_huge_fault(struct vm_fault *vmf,
592 default: 593 default:
593 return VM_FAULT_FALLBACK; 594 return VM_FAULT_FALLBACK;
594 } 595 }
595 rcu_read_unlock(); 596 srcu_read_unlock(&dax_srcu, id);
596 597
597 return rc; 598 return rc;
598} 599}
@@ -713,11 +714,11 @@ static void unregister_dax_dev(void *dev)
713 * Note, rcu is not protecting the liveness of dax_dev, rcu is 714 * Note, rcu is not protecting the liveness of dax_dev, rcu is
714 * ensuring that any fault handlers that might have seen 715 * ensuring that any fault handlers that might have seen
715 * dax_dev->alive == true, have completed. Any fault handlers 716 * dax_dev->alive == true, have completed. Any fault handlers
716 * that start after synchronize_rcu() has started will abort 717 * that start after synchronize_srcu() has started will abort
717 * upon seeing dax_dev->alive == false. 718 * upon seeing dax_dev->alive == false.
718 */ 719 */
719 dax_dev->alive = false; 720 dax_dev->alive = false;
720 synchronize_rcu(); 721 synchronize_srcu(&dax_srcu);
721 unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1); 722 unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
722 cdev_del(cdev); 723 cdev_del(cdev);
723 device_unregister(dev); 724 device_unregister(dev);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index e18dc596cf24..6204cc32d09c 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -251,8 +251,11 @@ static void bcm2835_dma_create_cb_set_length(
251 */ 251 */
252 252
253 /* have we filled in period_length yet? */ 253 /* have we filled in period_length yet? */
254 if (*total_len + control_block->length < period_len) 254 if (*total_len + control_block->length < period_len) {
255 /* update number of bytes in this period so far */
256 *total_len += control_block->length;
255 return; 257 return;
258 }
256 259
257 /* calculate the length that remains to reach period_length */ 260 /* calculate the length that remains to reach period_length */
258 control_block->length = period_len - *total_len; 261 control_block->length = period_len - *total_len;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 24e0221fd66d..d9118ec23025 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1108,12 +1108,14 @@ static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1108 switch (order) { 1108 switch (order) {
1109 case 0 ... 1: 1109 case 0 ... 1:
1110 return &unmap_pool[0]; 1110 return &unmap_pool[0];
1111#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1111 case 2 ... 4: 1112 case 2 ... 4:
1112 return &unmap_pool[1]; 1113 return &unmap_pool[1];
1113 case 5 ... 7: 1114 case 5 ... 7:
1114 return &unmap_pool[2]; 1115 return &unmap_pool[2];
1115 case 8: 1116 case 8:
1116 return &unmap_pool[3]; 1117 return &unmap_pool[3];
1118#endif
1117 default: 1119 default:
1118 BUG(); 1120 BUG();
1119 return NULL; 1121 return NULL;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 82d85cce81f8..4773f2867234 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -43,6 +43,7 @@ config EDAC_LEGACY_SYSFS
43 43
44config EDAC_DEBUG 44config EDAC_DEBUG
45 bool "Debugging" 45 bool "Debugging"
46 select DEBUG_FS
46 help 47 help
47 This turns on debugging information for the entire EDAC subsystem. 48 This turns on debugging information for the entire EDAC subsystem.
48 You do so by inserting edac_module with "edac_debug_level=x." Valid 49 You do so by inserting edac_module with "edac_debug_level=x." Valid
@@ -259,6 +260,15 @@ config EDAC_SKX
259 Support for error detection and correction the Intel 260 Support for error detection and correction the Intel
260 Skylake server Integrated Memory Controllers. 261 Skylake server Integrated Memory Controllers.
261 262
263config EDAC_PND2
264 tristate "Intel Pondicherry2"
265 depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL
266 help
267 Support for error detection and correction on the Intel
268 Pondicherry2 Integrated Memory Controller. This SoC IP is
269 first used on the Apollo Lake platform and Denverton
270 micro-server but may appear on others in the future.
271
262config EDAC_MPC85XX 272config EDAC_MPC85XX
263 tristate "Freescale MPC83xx / MPC85xx" 273 tristate "Freescale MPC83xx / MPC85xx"
264 depends on EDAC_MM_EDAC && FSL_SOC 274 depends on EDAC_MM_EDAC && FSL_SOC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 88e472e8b9a9..587107e90996 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_EDAC_I7300) += i7300_edac.o
32obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o 32obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o
33obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o 33obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o
34obj-$(CONFIG_EDAC_SKX) += skx_edac.o 34obj-$(CONFIG_EDAC_SKX) += skx_edac.o
35obj-$(CONFIG_EDAC_PND2) += pnd2_edac.o
35obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o 36obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
36obj-$(CONFIG_EDAC_E752X) += e752x_edac.o 37obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
37obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o 38obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 1670d27bcac8..f683919981b0 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1293,7 +1293,7 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
1293 dimm->mtype = MEM_FB_DDR2; 1293 dimm->mtype = MEM_FB_DDR2;
1294 1294
1295 /* ask what device type on this row */ 1295 /* ask what device type on this row */
1296 if (MTR_DRAM_WIDTH(mtr)) 1296 if (MTR_DRAM_WIDTH(mtr) == 8)
1297 dimm->dtype = DEV_X8; 1297 dimm->dtype = DEV_X8;
1298 else 1298 else
1299 dimm->dtype = DEV_X4; 1299 dimm->dtype = DEV_X4;
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index abf6ef22e220..37a9ba71da44 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -1207,13 +1207,14 @@ static int i5400_init_dimms(struct mem_ctl_info *mci)
1207 1207
1208 dimm->nr_pages = size_mb << 8; 1208 dimm->nr_pages = size_mb << 8;
1209 dimm->grain = 8; 1209 dimm->grain = 8;
1210 dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4; 1210 dimm->dtype = MTR_DRAM_WIDTH(mtr) == 8 ?
1211 DEV_X8 : DEV_X4;
1211 dimm->mtype = MEM_FB_DDR2; 1212 dimm->mtype = MEM_FB_DDR2;
1212 /* 1213 /*
1213 * The eccc mechanism is SDDC (aka SECC), with 1214 * The eccc mechanism is SDDC (aka SECC), with
1214 * is similar to Chipkill. 1215 * is similar to Chipkill.
1215 */ 1216 */
1216 dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ? 1217 dimm->edac_mode = MTR_DRAM_WIDTH(mtr) == 8 ?
1217 EDAC_S8ECD8ED : EDAC_S4ECD4ED; 1218 EDAC_S8ECD8ED : EDAC_S4ECD4ED;
1218 ndimms++; 1219 ndimms++;
1219 } 1220 }
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
new file mode 100644
index 000000000000..928e0dba41fc
--- /dev/null
+++ b/drivers/edac/pnd2_edac.c
@@ -0,0 +1,1546 @@
1/*
2 * Driver for Pondicherry2 memory controller.
3 *
4 * Copyright (c) 2016, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * [Derived from sb_edac.c]
16 *
17 * Translation of system physical addresses to DIMM addresses
18 * is a two stage process:
19 *
20 * First the Pondicherry 2 memory controller handles slice and channel interleaving
21 * in "sys2pmi()". This is (almost) completley common between platforms.
22 *
23 * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
24 * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
25 */
26
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/pci.h>
30#include <linux/pci_ids.h>
31#include <linux/slab.h>
32#include <linux/delay.h>
33#include <linux/edac.h>
34#include <linux/mmzone.h>
35#include <linux/smp.h>
36#include <linux/bitmap.h>
37#include <linux/math64.h>
38#include <linux/mod_devicetable.h>
39#include <asm/cpu_device_id.h>
40#include <asm/intel-family.h>
41#include <asm/processor.h>
42#include <asm/mce.h>
43
44#include "edac_mc.h"
45#include "edac_module.h"
46#include "pnd2_edac.h"
47
48#define APL_NUM_CHANNELS 4
49#define DNV_NUM_CHANNELS 2
50#define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
51
52enum type {
53 APL,
54 DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
55};
56
57struct dram_addr {
58 int chan;
59 int dimm;
60 int rank;
61 int bank;
62 int row;
63 int col;
64};
65
66struct pnd2_pvt {
67 int dimm_geom[APL_NUM_CHANNELS];
68 u64 tolm, tohm;
69};
70
71/*
72 * System address space is divided into multiple regions with
73 * different interleave rules in each. The as0/as1 regions
74 * have no interleaving at all. The as2 region is interleaved
75 * between two channels. The mot region is magic and may overlap
76 * other regions, with its interleave rules taking precedence.
77 * Addresses not in any of these regions are interleaved across
78 * all four channels.
79 */
80static struct region {
81 u64 base;
82 u64 limit;
83 u8 enabled;
84} mot, as0, as1, as2;
85
86static struct dunit_ops {
87 char *name;
88 enum type type;
89 int pmiaddr_shift;
90 int pmiidx_shift;
91 int channels;
92 int dimms_per_channel;
93 int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
94 int (*get_registers)(void);
95 int (*check_ecc)(void);
96 void (*mk_region)(char *name, struct region *rp, void *asym);
97 void (*get_dimm_config)(struct mem_ctl_info *mci);
98 int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
99 struct dram_addr *daddr, char *msg);
100} *ops;
101
102static struct mem_ctl_info *pnd2_mci;
103
104#define PND2_MSG_SIZE 256
105
106/* Debug macros */
107#define pnd2_printk(level, fmt, arg...) \
108 edac_printk(level, "pnd2", fmt, ##arg)
109
110#define pnd2_mc_printk(mci, level, fmt, arg...) \
111 edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
112
113#define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
114#define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
115#define SELECTOR_DISABLED (-1)
116#define _4GB (1ul << 32)
117
118#define PMI_ADDRESS_WIDTH 31
119#define PND_MAX_PHYS_BIT 39
120
121#define APL_ASYMSHIFT 28
122#define DNV_ASYMSHIFT 31
123#define CH_HASH_MASK_LSB 6
124#define SLICE_HASH_MASK_LSB 6
125#define MOT_SLC_INTLV_BIT 12
126#define LOG2_PMI_ADDR_GRANULARITY 5
127#define MOT_SHIFT 24
128
129#define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
130#define U64_LSHIFT(val, s) ((u64)(val) << (s))
131
132#ifdef CONFIG_X86_INTEL_SBI_APL
133#include "linux/platform_data/sbi_apl.h"
134int sbi_send(int port, int off, int op, u32 *data)
135{
136 struct sbi_apl_message sbi_arg;
137 int ret, read = 0;
138
139 memset(&sbi_arg, 0, sizeof(sbi_arg));
140
141 if (op == 0 || op == 4 || op == 6)
142 read = 1;
143 else
144 sbi_arg.data = *data;
145
146 sbi_arg.opcode = op;
147 sbi_arg.port_address = port;
148 sbi_arg.register_offset = off;
149 ret = sbi_apl_commit(&sbi_arg);
150 if (ret || sbi_arg.status)
151 edac_dbg(2, "sbi_send status=%d ret=%d data=%x\n",
152 sbi_arg.status, ret, sbi_arg.data);
153
154 if (ret == 0)
155 ret = sbi_arg.status;
156
157 if (ret == 0 && read)
158 *data = sbi_arg.data;
159
160 return ret;
161}
162#else
163int sbi_send(int port, int off, int op, u32 *data)
164{
165 return -EUNATCH;
166}
167#endif
168
169static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
170{
171 int ret = 0;
172
173 edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
174 switch (sz) {
175 case 8:
176 ret = sbi_send(port, off + 4, op, (u32 *)(data + 4));
177 case 4:
178 ret = sbi_send(port, off, op, (u32 *)data);
179 pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
180 sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
181 break;
182 }
183
184 return ret;
185}
186
187static u64 get_mem_ctrl_hub_base_addr(void)
188{
189 struct b_cr_mchbar_lo_pci lo;
190 struct b_cr_mchbar_hi_pci hi;
191 struct pci_dev *pdev;
192
193 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
194 if (pdev) {
195 pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
196 pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
197 pci_dev_put(pdev);
198 } else {
199 return 0;
200 }
201
202 if (!lo.enable) {
203 edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
204 return 0;
205 }
206
207 return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
208}
209
210static u64 get_sideband_reg_base_addr(void)
211{
212 struct pci_dev *pdev;
213 u32 hi, lo;
214
215 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
216 if (pdev) {
217 pci_read_config_dword(pdev, 0x10, &lo);
218 pci_read_config_dword(pdev, 0x14, &hi);
219 pci_dev_put(pdev);
220 return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
221 } else {
222 return 0xfd000000;
223 }
224}
225
226static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
227{
228 struct pci_dev *pdev;
229 char *base;
230 u64 addr;
231
232 if (op == 4) {
233 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
234 if (!pdev)
235 return -ENODEV;
236
237 pci_read_config_dword(pdev, off, data);
238 pci_dev_put(pdev);
239 } else {
240 /* MMIO via memory controller hub base address */
241 if (op == 0 && port == 0x4c) {
242 addr = get_mem_ctrl_hub_base_addr();
243 if (!addr)
244 return -ENODEV;
245 } else {
246 /* MMIO via sideband register base address */
247 addr = get_sideband_reg_base_addr();
248 if (!addr)
249 return -ENODEV;
250 addr += (port << 16);
251 }
252
253 base = ioremap((resource_size_t)addr, 0x10000);
254 if (!base)
255 return -ENODEV;
256
257 if (sz == 8)
258 *(u32 *)(data + 4) = *(u32 *)(base + off + 4);
259 *(u32 *)data = *(u32 *)(base + off);
260
261 iounmap(base);
262 }
263
264 edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
265 (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
266
267 return 0;
268}
269
270#define RD_REGP(regp, regname, port) \
271 ops->rd_reg(port, \
272 regname##_offset, \
273 regname##_r_opcode, \
274 regp, sizeof(struct regname), \
275 #regname)
276
277#define RD_REG(regp, regname) \
278 ops->rd_reg(regname ## _port, \
279 regname##_offset, \
280 regname##_r_opcode, \
281 regp, sizeof(struct regname), \
282 #regname)
283
284static u64 top_lm, top_hm;
285static bool two_slices;
286static bool two_channels; /* Both PMI channels in one slice enabled */
287
288static u8 sym_chan_mask;
289static u8 asym_chan_mask;
290static u8 chan_mask;
291
292static int slice_selector = -1;
293static int chan_selector = -1;
294static u64 slice_hash_mask;
295static u64 chan_hash_mask;
296
297static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
298{
299 rp->enabled = 1;
300 rp->base = base;
301 rp->limit = limit;
302 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
303}
304
305static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
306{
307 if (mask == 0) {
308 pr_info(FW_BUG "MOT mask cannot be zero\n");
309 return;
310 }
311 if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
312 pr_info(FW_BUG "MOT mask not power of two\n");
313 return;
314 }
315 if (base & ~mask) {
316 pr_info(FW_BUG "MOT region base/mask alignment error\n");
317 return;
318 }
319 rp->base = base;
320 rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
321 rp->enabled = 1;
322 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
323}
324
325static bool in_region(struct region *rp, u64 addr)
326{
327 if (!rp->enabled)
328 return false;
329
330 return rp->base <= addr && addr <= rp->limit;
331}
332
333static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
334{
335 int mask = 0;
336
337 if (!p->slice_0_mem_disabled)
338 mask |= p->sym_slice0_channel_enabled;
339
340 if (!p->slice_1_disabled)
341 mask |= p->sym_slice1_channel_enabled << 2;
342
343 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
344 mask &= 0x5;
345
346 return mask;
347}
348
349static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
350 struct b_cr_asym_mem_region0_mchbar *as0,
351 struct b_cr_asym_mem_region1_mchbar *as1,
352 struct b_cr_asym_2way_mem_region_mchbar *as2way)
353{
354 const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
355 int mask = 0;
356
357 if (as2way->asym_2way_interleave_enable)
358 mask = intlv[as2way->asym_2way_intlv_mode];
359 if (as0->slice0_asym_enable)
360 mask |= (1 << as0->slice0_asym_channel_select);
361 if (as1->slice1_asym_enable)
362 mask |= (4 << as1->slice1_asym_channel_select);
363 if (p->slice_0_mem_disabled)
364 mask &= 0xc;
365 if (p->slice_1_disabled)
366 mask &= 0x3;
367 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
368 mask &= 0x5;
369
370 return mask;
371}
372
373static struct b_cr_tolud_pci tolud;
374static struct b_cr_touud_lo_pci touud_lo;
375static struct b_cr_touud_hi_pci touud_hi;
376static struct b_cr_asym_mem_region0_mchbar asym0;
377static struct b_cr_asym_mem_region1_mchbar asym1;
378static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
379static struct b_cr_mot_out_base_mchbar mot_base;
380static struct b_cr_mot_out_mask_mchbar mot_mask;
381static struct b_cr_slice_channel_hash chash;
382
383/* Apollo Lake dunit */
384/*
385 * Validated on board with just two DIMMs in the [0] and [2] positions
386 * in this array. Other port number matches documentation, but caution
387 * advised.
388 */
389static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
390static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
391
392/* Denverton dunit */
393static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
394static struct d_cr_dsch dsch;
395static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
396static struct d_cr_drp drp[DNV_NUM_CHANNELS];
397static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
398static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
399static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
400static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
401static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
402static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
403
404static void apl_mk_region(char *name, struct region *rp, void *asym)
405{
406 struct b_cr_asym_mem_region0_mchbar *a = asym;
407
408 mk_region(name, rp,
409 U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
410 U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
411 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
412}
413
414static void dnv_mk_region(char *name, struct region *rp, void *asym)
415{
416 struct b_cr_asym_mem_region_denverton *a = asym;
417
418 mk_region(name, rp,
419 U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
420 U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
421 GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
422}
423
424static int apl_get_registers(void)
425{
426 int i;
427
428 if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
429 return -ENODEV;
430
431 for (i = 0; i < APL_NUM_CHANNELS; i++)
432 if (RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
433 return -ENODEV;
434
435 return 0;
436}
437
438static int dnv_get_registers(void)
439{
440 int i;
441
442 if (RD_REG(&dsch, d_cr_dsch))
443 return -ENODEV;
444
445 for (i = 0; i < DNV_NUM_CHANNELS; i++)
446 if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
447 RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
448 RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
449 RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
450 RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
451 RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
452 RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
453 RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
454 return -ENODEV;
455
456 return 0;
457}
458
459/*
460 * Read all the h/w config registers once here (they don't
461 * change at run time. Figure out which address ranges have
462 * which interleave characteristics.
463 */
464static int get_registers(void)
465{
466 const int intlv[] = { 10, 11, 12, 12 };
467
468 if (RD_REG(&tolud, b_cr_tolud_pci) ||
469 RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
470 RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
471 RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
472 RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
473 RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
474 RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
475 RD_REG(&chash, b_cr_slice_channel_hash))
476 return -ENODEV;
477
478 if (ops->get_registers())
479 return -ENODEV;
480
481 if (ops->type == DNV) {
482 /* PMI channel idx (always 0) for asymmetric region */
483 asym0.slice0_asym_channel_select = 0;
484 asym1.slice1_asym_channel_select = 0;
485 /* PMI channel bitmap (always 1) for symmetric region */
486 chash.sym_slice0_channel_enabled = 0x1;
487 chash.sym_slice1_channel_enabled = 0x1;
488 }
489
490 if (asym0.slice0_asym_enable)
491 ops->mk_region("as0", &as0, &asym0);
492
493 if (asym1.slice1_asym_enable)
494 ops->mk_region("as1", &as1, &asym1);
495
496 if (asym_2way.asym_2way_interleave_enable) {
497 mk_region("as2way", &as2,
498 U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
499 U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
500 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
501 }
502
503 if (mot_base.imr_en) {
504 mk_region_mask("mot", &mot,
505 U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
506 U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
507 }
508
509 top_lm = U64_LSHIFT(tolud.tolud, 20);
510 top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
511
512 two_slices = !chash.slice_1_disabled &&
513 !chash.slice_0_mem_disabled &&
514 (chash.sym_slice0_channel_enabled != 0) &&
515 (chash.sym_slice1_channel_enabled != 0);
516 two_channels = !chash.ch_1_disabled &&
517 !chash.enable_pmi_dual_data_mode &&
518 ((chash.sym_slice0_channel_enabled == 3) ||
519 (chash.sym_slice1_channel_enabled == 3));
520
521 sym_chan_mask = gen_sym_mask(&chash);
522 asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
523 chan_mask = sym_chan_mask | asym_chan_mask;
524
525 if (two_slices && !two_channels) {
526 if (chash.hvm_mode)
527 slice_selector = 29;
528 else
529 slice_selector = intlv[chash.interleave_mode];
530 } else if (!two_slices && two_channels) {
531 if (chash.hvm_mode)
532 chan_selector = 29;
533 else
534 chan_selector = intlv[chash.interleave_mode];
535 } else if (two_slices && two_channels) {
536 if (chash.hvm_mode) {
537 slice_selector = 29;
538 chan_selector = 30;
539 } else {
540 slice_selector = intlv[chash.interleave_mode];
541 chan_selector = intlv[chash.interleave_mode] + 1;
542 }
543 }
544
545 if (two_slices) {
546 if (!chash.hvm_mode)
547 slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
548 if (!two_channels)
549 slice_hash_mask |= BIT_ULL(slice_selector);
550 }
551
552 if (two_channels) {
553 if (!chash.hvm_mode)
554 chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
555 if (!two_slices)
556 chan_hash_mask |= BIT_ULL(chan_selector);
557 }
558
559 return 0;
560}
561
562/* Get a contiguous memory address (remove the MMIO gap) */
563static u64 remove_mmio_gap(u64 sys)
564{
565 return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
566}
567
568/* Squeeze out one address bit, shift upper part down to fill gap */
569static void remove_addr_bit(u64 *addr, int bitidx)
570{
571 u64 mask;
572
573 if (bitidx == -1)
574 return;
575
576 mask = (1ull << bitidx) - 1;
577 *addr = ((*addr >> 1) & ~mask) | (*addr & mask);
578}
579
580/* XOR all the bits from addr specified in mask */
581static int hash_by_mask(u64 addr, u64 mask)
582{
583 u64 result = addr & mask;
584
585 result = (result >> 32) ^ result;
586 result = (result >> 16) ^ result;
587 result = (result >> 8) ^ result;
588 result = (result >> 4) ^ result;
589 result = (result >> 2) ^ result;
590 result = (result >> 1) ^ result;
591
592 return (int)result & 1;
593}
594
595/*
596 * First stage decode. Take the system address and figure out which
597 * second stage will deal with it based on interleave modes.
598 */
599static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
600{
601 u64 contig_addr, contig_base, contig_offset, contig_base_adj;
602 int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
603 MOT_CHAN_INTLV_BIT_1SLC_2CH;
604 int slice_intlv_bit_rm = SELECTOR_DISABLED;
605 int chan_intlv_bit_rm = SELECTOR_DISABLED;
606 /* Determine if address is in the MOT region. */
607 bool mot_hit = in_region(&mot, addr);
608 /* Calculate the number of symmetric regions enabled. */
609 int sym_channels = hweight8(sym_chan_mask);
610
611 /*
612 * The amount we need to shift the asym base can be determined by the
613 * number of enabled symmetric channels.
614 * NOTE: This can only work because symmetric memory is not supposed
615 * to do a 3-way interleave.
616 */
617 int sym_chan_shift = sym_channels >> 1;
618
619 /* Give up if address is out of range, or in MMIO gap */
620 if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
621 (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
622 snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
623 return -EINVAL;
624 }
625
626 /* Get a contiguous memory address (remove the MMIO gap) */
627 contig_addr = remove_mmio_gap(addr);
628
629 if (in_region(&as0, addr)) {
630 *pmiidx = asym0.slice0_asym_channel_select;
631
632 contig_base = remove_mmio_gap(as0.base);
633 contig_offset = contig_addr - contig_base;
634 contig_base_adj = (contig_base >> sym_chan_shift) *
635 ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
636 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
637 } else if (in_region(&as1, addr)) {
638 *pmiidx = 2u + asym1.slice1_asym_channel_select;
639
640 contig_base = remove_mmio_gap(as1.base);
641 contig_offset = contig_addr - contig_base;
642 contig_base_adj = (contig_base >> sym_chan_shift) *
643 ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
644 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
645 } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
646 bool channel1;
647
648 mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
649 *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
650 channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
651 hash_by_mask(contig_addr, chan_hash_mask);
652 *pmiidx |= (u32)channel1;
653
654 contig_base = remove_mmio_gap(as2.base);
655 chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
656 contig_offset = contig_addr - contig_base;
657 remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
658 contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
659 } else {
660 /* Otherwise we're in normal, boring symmetric mode. */
661 *pmiidx = 0u;
662
663 if (two_slices) {
664 bool slice1;
665
666 if (mot_hit) {
667 slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
668 slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
669 } else {
670 slice_intlv_bit_rm = slice_selector;
671 slice1 = hash_by_mask(addr, slice_hash_mask);
672 }
673
674 *pmiidx = (u32)slice1 << 1;
675 }
676
677 if (two_channels) {
678 bool channel1;
679
680 mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
681 MOT_CHAN_INTLV_BIT_1SLC_2CH;
682
683 if (mot_hit) {
684 chan_intlv_bit_rm = mot_intlv_bit;
685 channel1 = (addr >> mot_intlv_bit) & 1;
686 } else {
687 chan_intlv_bit_rm = chan_selector;
688 channel1 = hash_by_mask(contig_addr, chan_hash_mask);
689 }
690
691 *pmiidx |= (u32)channel1;
692 }
693 }
694
695 /* Remove the chan_selector bit first */
696 remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
697 /* Remove the slice bit (we remove it second because it must be lower */
698 remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
699 *pmiaddr = contig_addr;
700
701 return 0;
702}
703
704/* Translate PMI address to memory (rank, row, bank, column) */
705#define C(n) (0x10 | (n)) /* column */
706#define B(n) (0x20 | (n)) /* bank */
707#define R(n) (0x40 | (n)) /* row */
708#define RS (0x80) /* rank */
709
710/* addrdec values */
711#define AMAP_1KB 0
712#define AMAP_2KB 1
713#define AMAP_4KB 2
714#define AMAP_RSVD 3
715
716/* dden values */
717#define DEN_4Gb 0
718#define DEN_8Gb 2
719
720/* dwid values */
721#define X8 0
722#define X16 1
723
724static struct dimm_geometry {
725 u8 addrdec;
726 u8 dden;
727 u8 dwid;
728 u8 rowbits, colbits;
729 u16 bits[PMI_ADDRESS_WIDTH];
730} dimms[] = {
731 {
732 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
733 .rowbits = 15, .colbits = 10,
734 .bits = {
735 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
736 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
737 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
738 0, 0, 0, 0
739 }
740 },
741 {
742 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
743 .rowbits = 16, .colbits = 10,
744 .bits = {
745 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
746 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
747 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
748 R(15), 0, 0, 0
749 }
750 },
751 {
752 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
753 .rowbits = 16, .colbits = 10,
754 .bits = {
755 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
756 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
757 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
758 R(15), 0, 0, 0
759 }
760 },
761 {
762 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
763 .rowbits = 16, .colbits = 11,
764 .bits = {
765 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
766 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
767 R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13),
768 R(14), R(15), 0, 0
769 }
770 },
771 {
772 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
773 .rowbits = 15, .colbits = 10,
774 .bits = {
775 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
776 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
777 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
778 0, 0, 0, 0
779 }
780 },
781 {
782 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
783 .rowbits = 16, .colbits = 10,
784 .bits = {
785 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
786 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
787 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
788 R(15), 0, 0, 0
789 }
790 },
791 {
792 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
793 .rowbits = 16, .colbits = 10,
794 .bits = {
795 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
796 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
797 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
798 R(15), 0, 0, 0
799 }
800 },
801 {
802 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
803 .rowbits = 16, .colbits = 11,
804 .bits = {
805 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
806 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
807 R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13),
808 R(14), R(15), 0, 0
809 }
810 },
811 {
812 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
813 .rowbits = 15, .colbits = 10,
814 .bits = {
815 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
816 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
817 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
818 0, 0, 0, 0
819 }
820 },
821 {
822 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
823 .rowbits = 16, .colbits = 10,
824 .bits = {
825 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
826 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
827 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
828 R(15), 0, 0, 0
829 }
830 },
831 {
832 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
833 .rowbits = 16, .colbits = 10,
834 .bits = {
835 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
836 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
837 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
838 R(15), 0, 0, 0
839 }
840 },
841 {
842 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
843 .rowbits = 16, .colbits = 11,
844 .bits = {
845 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
846 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
847 R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13),
848 R(14), R(15), 0, 0
849 }
850 }
851};
852
853static int bank_hash(u64 pmiaddr, int idx, int shft)
854{
855 int bhash = 0;
856
857 switch (idx) {
858 case 0:
859 bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
860 break;
861 case 1:
862 bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
863 bhash ^= ((pmiaddr >> 22) & 1) << 1;
864 break;
865 case 2:
866 bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
867 break;
868 }
869
870 return bhash;
871}
872
873static int rank_hash(u64 pmiaddr)
874{
875 return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
876}
877
878/* Second stage decode. Compute rank, bank, row & column. */
879static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
880 struct dram_addr *daddr, char *msg)
881{
882 struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
883 struct pnd2_pvt *pvt = mci->pvt_info;
884 int g = pvt->dimm_geom[pmiidx];
885 struct dimm_geometry *d = &dimms[g];
886 int column = 0, bank = 0, row = 0, rank = 0;
887 int i, idx, type, skiprs = 0;
888
889 for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
890 int bit = (pmiaddr >> i) & 1;
891
892 if (i + skiprs >= PMI_ADDRESS_WIDTH) {
893 snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
894 return -EINVAL;
895 }
896
897 type = d->bits[i + skiprs] & ~0xf;
898 idx = d->bits[i + skiprs] & 0xf;
899
900 /*
901 * On single rank DIMMs ignore the rank select bit
902 * and shift remainder of "bits[]" down one place.
903 */
904 if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
905 skiprs = 1;
906 type = d->bits[i + skiprs] & ~0xf;
907 idx = d->bits[i + skiprs] & 0xf;
908 }
909
910 switch (type) {
911 case C(0):
912 column |= (bit << idx);
913 break;
914 case B(0):
915 bank |= (bit << idx);
916 if (cr_drp0->bahen)
917 bank ^= bank_hash(pmiaddr, idx, d->addrdec);
918 break;
919 case R(0):
920 row |= (bit << idx);
921 break;
922 case RS:
923 rank = bit;
924 if (cr_drp0->rsien)
925 rank ^= rank_hash(pmiaddr);
926 break;
927 default:
928 if (bit) {
929 snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
930 return -EINVAL;
931 }
932 goto done;
933 }
934 }
935
936done:
937 daddr->col = column;
938 daddr->bank = bank;
939 daddr->row = row;
940 daddr->rank = rank;
941 daddr->dimm = 0;
942
943 return 0;
944}
945
946/* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
947#define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
948
949static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
950 struct dram_addr *daddr, char *msg)
951{
952 /* Rank 0 or 1 */
953 daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
954 /* Rank 2 or 3 */
955 daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
956
957 /*
958 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
959 * flip them if DIMM1 is larger than DIMM0.
960 */
961 daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
962
963 daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
964 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
965 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
966 if (dsch.ddr4en)
967 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
968 if (dmap1[pmiidx].bxor) {
969 if (dsch.ddr4en) {
970 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
971 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
972 if (dsch.chan_width == 0)
973 /* 64/72 bit dram channel width */
974 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
975 else
976 /* 32/40 bit dram channel width */
977 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
978 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
979 } else {
980 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
981 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
982 if (dsch.chan_width == 0)
983 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
984 else
985 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
986 }
987 }
988
989 daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
990 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
991 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
992 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
993 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
994 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
995 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
996 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
997 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
998 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
999 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1000 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1001 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1002 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1003 if (dmap4[pmiidx].row14 != 31)
1004 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1005 if (dmap4[pmiidx].row15 != 31)
1006 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1007 if (dmap4[pmiidx].row16 != 31)
1008 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1009 if (dmap4[pmiidx].row17 != 31)
1010 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1011
1012 daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1013 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1014 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1015 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1016 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1017 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1018 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1019 if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1020 daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1021
1022 return 0;
1023}
1024
1025static int check_channel(int ch)
1026{
1027 if (drp0[ch].dramtype != 0) {
1028 pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1029 return 1;
1030 } else if (drp0[ch].eccen == 0) {
1031 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1032 return 1;
1033 }
1034 return 0;
1035}
1036
1037static int apl_check_ecc_active(void)
1038{
1039 int i, ret = 0;
1040
1041 /* Check dramtype and ECC mode for each present DIMM */
1042 for (i = 0; i < APL_NUM_CHANNELS; i++)
1043 if (chan_mask & BIT(i))
1044 ret += check_channel(i);
1045 return ret ? -EINVAL : 0;
1046}
1047
1048#define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1049
1050static int check_unit(int ch)
1051{
1052 struct d_cr_drp *d = &drp[ch];
1053
1054 if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1055 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1056 return 1;
1057 }
1058 return 0;
1059}
1060
1061static int dnv_check_ecc_active(void)
1062{
1063 int i, ret = 0;
1064
1065 for (i = 0; i < DNV_NUM_CHANNELS; i++)
1066 ret += check_unit(i);
1067 return ret ? -EINVAL : 0;
1068}
1069
1070static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1071 struct dram_addr *daddr, char *msg)
1072{
1073 u64 pmiaddr;
1074 u32 pmiidx;
1075 int ret;
1076
1077 ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1078 if (ret)
1079 return ret;
1080
1081 pmiaddr >>= ops->pmiaddr_shift;
1082 /* pmi channel idx to dimm channel idx */
1083 pmiidx >>= ops->pmiidx_shift;
1084 daddr->chan = pmiidx;
1085
1086 ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1087 if (ret)
1088 return ret;
1089
1090 edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1091 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1092
1093 return 0;
1094}
1095
1096static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1097 struct dram_addr *daddr)
1098{
1099 enum hw_event_mc_err_type tp_event;
1100 char *optype, msg[PND2_MSG_SIZE];
1101 bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1102 bool overflow = m->status & MCI_STATUS_OVER;
1103 bool uc_err = m->status & MCI_STATUS_UC;
1104 bool recov = m->status & MCI_STATUS_S;
1105 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1106 u32 mscod = GET_BITFIELD(m->status, 16, 31);
1107 u32 errcode = GET_BITFIELD(m->status, 0, 15);
1108 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1109 int rc;
1110
1111 tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
1112 HW_EVENT_ERR_CORRECTED;
1113
1114 /*
1115 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1116 * memory errors should fit in this mask:
1117 * 000f 0000 1mmm cccc (binary)
1118 * where:
1119 * f = Correction Report Filtering Bit. If 1, subsequent errors
1120 * won't be shown
1121 * mmm = error type
1122 * cccc = channel
1123 * If the mask doesn't match, report an error to the parsing logic
1124 */
1125 if (!((errcode & 0xef80) == 0x80)) {
1126 optype = "Can't parse: it is not a mem";
1127 } else {
1128 switch (optypenum) {
1129 case 0:
1130 optype = "generic undef request error";
1131 break;
1132 case 1:
1133 optype = "memory read error";
1134 break;
1135 case 2:
1136 optype = "memory write error";
1137 break;
1138 case 3:
1139 optype = "addr/cmd error";
1140 break;
1141 case 4:
1142 optype = "memory scrubbing error";
1143 break;
1144 default:
1145 optype = "reserved";
1146 break;
1147 }
1148 }
1149
1150 /* Only decode errors with an valid address (ADDRV) */
1151 if (!(m->status & MCI_STATUS_ADDRV))
1152 return;
1153
1154 rc = get_memory_error_data(mci, m->addr, daddr, msg);
1155 if (rc)
1156 goto address_error;
1157
1158 snprintf(msg, sizeof(msg),
1159 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1160 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1161 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1162
1163 edac_dbg(0, "%s\n", msg);
1164
1165 /* Call the helper to output message */
1166 edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
1167 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
1168
1169 return;
1170
1171address_error:
1172 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1173}
1174
1175static void apl_get_dimm_config(struct mem_ctl_info *mci)
1176{
1177 struct pnd2_pvt *pvt = mci->pvt_info;
1178 struct dimm_info *dimm;
1179 struct d_cr_drp0 *d;
1180 u64 capacity;
1181 int i, g;
1182
1183 for (i = 0; i < APL_NUM_CHANNELS; i++) {
1184 if (!(chan_mask & BIT(i)))
1185 continue;
1186
1187 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0);
1188 if (!dimm) {
1189 edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1190 continue;
1191 }
1192
1193 d = &drp0[i];
1194 for (g = 0; g < ARRAY_SIZE(dimms); g++)
1195 if (dimms[g].addrdec == d->addrdec &&
1196 dimms[g].dden == d->dden &&
1197 dimms[g].dwid == d->dwid)
1198 break;
1199
1200 if (g == ARRAY_SIZE(dimms)) {
1201 edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1202 continue;
1203 }
1204
1205 pvt->dimm_geom[i] = g;
1206 capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
1207 (1ul << dimms[g].colbits);
1208 edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1209 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1210 dimm->grain = 32;
1211 dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1212 dimm->mtype = MEM_DDR3;
1213 dimm->edac_mode = EDAC_SECDED;
1214 snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1215 }
1216}
1217
1218static const int dnv_dtypes[] = {
1219 DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1220};
1221
1222static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1223{
1224 int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1225 struct dimm_info *dimm;
1226 struct d_cr_drp *d;
1227 u64 capacity;
1228
1229 if (dsch.ddr4en) {
1230 memtype = MEM_DDR4;
1231 banks = 16;
1232 colbits = 10;
1233 } else {
1234 memtype = MEM_DDR3;
1235 banks = 8;
1236 }
1237
1238 for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1239 if (dmap4[i].row14 == 31)
1240 rowbits = 14;
1241 else if (dmap4[i].row15 == 31)
1242 rowbits = 15;
1243 else if (dmap4[i].row16 == 31)
1244 rowbits = 16;
1245 else if (dmap4[i].row17 == 31)
1246 rowbits = 17;
1247 else
1248 rowbits = 18;
1249
1250 if (memtype == MEM_DDR3) {
1251 if (dmap1[i].ca11 != 0x3f)
1252 colbits = 12;
1253 else
1254 colbits = 10;
1255 }
1256
1257 d = &drp[i];
1258 /* DIMM0 is present if rank0 and/or rank1 is enabled */
1259 ranks_of_dimm[0] = d->rken0 + d->rken1;
1260 /* DIMM1 is present if rank2 and/or rank3 is enabled */
1261 ranks_of_dimm[1] = d->rken2 + d->rken3;
1262
1263 for (j = 0; j < DNV_MAX_DIMMS; j++) {
1264 if (!ranks_of_dimm[j])
1265 continue;
1266
1267 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
1268 if (!dimm) {
1269 edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1270 continue;
1271 }
1272
1273 capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
1274 edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1275 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1276 dimm->grain = 32;
1277 dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1278 dimm->mtype = memtype;
1279 dimm->edac_mode = EDAC_SECDED;
1280 snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1281 }
1282 }
1283}
1284
1285static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1286{
1287 struct edac_mc_layer layers[2];
1288 struct mem_ctl_info *mci;
1289 struct pnd2_pvt *pvt;
1290 int rc;
1291
1292 rc = ops->check_ecc();
1293 if (rc < 0)
1294 return rc;
1295
1296 /* Allocate a new MC control structure */
1297 layers[0].type = EDAC_MC_LAYER_CHANNEL;
1298 layers[0].size = ops->channels;
1299 layers[0].is_virt_csrow = false;
1300 layers[1].type = EDAC_MC_LAYER_SLOT;
1301 layers[1].size = ops->dimms_per_channel;
1302 layers[1].is_virt_csrow = true;
1303 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1304 if (!mci)
1305 return -ENOMEM;
1306
1307 pvt = mci->pvt_info;
1308 memset(pvt, 0, sizeof(*pvt));
1309
1310 mci->mod_name = "pnd2_edac.c";
1311 mci->dev_name = ops->name;
1312 mci->ctl_name = "Pondicherry2";
1313
1314 /* Get dimm basic config and the memory layout */
1315 ops->get_dimm_config(mci);
1316
1317 if (edac_mc_add_mc(mci)) {
1318 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1319 edac_mc_free(mci);
1320 return -EINVAL;
1321 }
1322
1323 *ppmci = mci;
1324
1325 return 0;
1326}
1327
1328static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1329{
1330 if (unlikely(!mci || !mci->pvt_info)) {
1331 pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1332 return;
1333 }
1334
1335 /* Remove MC sysfs nodes */
1336 edac_mc_del_mc(NULL);
1337 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1338 edac_mc_free(mci);
1339}
1340
1341/*
1342 * Callback function registered with core kernel mce code.
1343 * Called once for each logged error.
1344 */
1345static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1346{
1347 struct mce *mce = (struct mce *)data;
1348 struct mem_ctl_info *mci;
1349 struct dram_addr daddr;
1350 char *type;
1351
1352 if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
1353 return NOTIFY_DONE;
1354
1355 mci = pnd2_mci;
1356 if (!mci)
1357 return NOTIFY_DONE;
1358
1359 /*
1360 * Just let mcelog handle it if the error is
1361 * outside the memory controller. A memory error
1362 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1363 * bit 12 has an special meaning.
1364 */
1365 if ((mce->status & 0xefff) >> 7 != 1)
1366 return NOTIFY_DONE;
1367
1368 if (mce->mcgstatus & MCG_STATUS_MCIP)
1369 type = "Exception";
1370 else
1371 type = "Event";
1372
1373 pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1374 pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1375 mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1376 pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1377 pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1378 pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1379 pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1380 mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1381
1382 pnd2_mce_output_error(mci, mce, &daddr);
1383
1384 /* Advice mcelog that the error were handled */
1385 return NOTIFY_STOP;
1386}
1387
1388static struct notifier_block pnd2_mce_dec = {
1389 .notifier_call = pnd2_mce_check_error,
1390};
1391
1392#ifdef CONFIG_EDAC_DEBUG
1393/*
1394 * Write an address to this file to exercise the address decode
1395 * logic in this driver.
1396 */
1397static u64 pnd2_fake_addr;
1398#define PND2_BLOB_SIZE 1024
1399static char pnd2_result[PND2_BLOB_SIZE];
1400static struct dentry *pnd2_test;
1401static struct debugfs_blob_wrapper pnd2_blob = {
1402 .data = pnd2_result,
1403 .size = 0
1404};
1405
1406static int debugfs_u64_set(void *data, u64 val)
1407{
1408 struct dram_addr daddr;
1409 struct mce m;
1410
1411 *(u64 *)data = val;
1412 m.mcgstatus = 0;
1413 /* ADDRV + MemRd + Unknown channel */
1414 m.status = MCI_STATUS_ADDRV + 0x9f;
1415 m.addr = val;
1416 pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1417 snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1418 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1419 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1420 pnd2_blob.size = strlen(pnd2_blob.data);
1421
1422 return 0;
1423}
1424DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1425
1426static void setup_pnd2_debug(void)
1427{
1428 pnd2_test = edac_debugfs_create_dir("pnd2_test");
1429 edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1430 &pnd2_fake_addr, &fops_u64_wo);
1431 debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1432}
1433
1434static void teardown_pnd2_debug(void)
1435{
1436 debugfs_remove_recursive(pnd2_test);
1437}
1438#else
1439static void setup_pnd2_debug(void) {}
1440static void teardown_pnd2_debug(void) {}
1441#endif /* CONFIG_EDAC_DEBUG */
1442
1443
1444static int pnd2_probe(void)
1445{
1446 int rc;
1447
1448 edac_dbg(2, "\n");
1449 rc = get_registers();
1450 if (rc)
1451 return rc;
1452
1453 return pnd2_register_mci(&pnd2_mci);
1454}
1455
1456static void pnd2_remove(void)
1457{
1458 edac_dbg(0, "\n");
1459 pnd2_unregister_mci(pnd2_mci);
1460}
1461
1462static struct dunit_ops apl_ops = {
1463 .name = "pnd2/apl",
1464 .type = APL,
1465 .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY,
1466 .pmiidx_shift = 0,
1467 .channels = APL_NUM_CHANNELS,
1468 .dimms_per_channel = 1,
1469 .rd_reg = apl_rd_reg,
1470 .get_registers = apl_get_registers,
1471 .check_ecc = apl_check_ecc_active,
1472 .mk_region = apl_mk_region,
1473 .get_dimm_config = apl_get_dimm_config,
1474 .pmi2mem = apl_pmi2mem,
1475};
1476
1477static struct dunit_ops dnv_ops = {
1478 .name = "pnd2/dnv",
1479 .type = DNV,
1480 .pmiaddr_shift = 0,
1481 .pmiidx_shift = 1,
1482 .channels = DNV_NUM_CHANNELS,
1483 .dimms_per_channel = 2,
1484 .rd_reg = dnv_rd_reg,
1485 .get_registers = dnv_get_registers,
1486 .check_ecc = dnv_check_ecc_active,
1487 .mk_region = dnv_mk_region,
1488 .get_dimm_config = dnv_get_dimm_config,
1489 .pmi2mem = dnv_pmi2mem,
1490};
1491
1492static const struct x86_cpu_id pnd2_cpuids[] = {
1493 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
1494 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops },
1495 { }
1496};
1497MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1498
1499static int __init pnd2_init(void)
1500{
1501 const struct x86_cpu_id *id;
1502 int rc;
1503
1504 edac_dbg(2, "\n");
1505
1506 id = x86_match_cpu(pnd2_cpuids);
1507 if (!id)
1508 return -ENODEV;
1509
1510 ops = (struct dunit_ops *)id->driver_data;
1511
1512 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1513 opstate_init();
1514
1515 rc = pnd2_probe();
1516 if (rc < 0) {
1517 pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1518 return rc;
1519 }
1520
1521 if (!pnd2_mci)
1522 return -ENODEV;
1523
1524 mce_register_decode_chain(&pnd2_mce_dec);
1525 setup_pnd2_debug();
1526
1527 return 0;
1528}
1529
1530static void __exit pnd2_exit(void)
1531{
1532 edac_dbg(2, "\n");
1533 teardown_pnd2_debug();
1534 mce_unregister_decode_chain(&pnd2_mce_dec);
1535 pnd2_remove();
1536}
1537
1538module_init(pnd2_init);
1539module_exit(pnd2_exit);
1540
1541module_param(edac_op_state, int, 0444);
1542MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1543
1544MODULE_LICENSE("GPL v2");
1545MODULE_AUTHOR("Tony Luck");
1546MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");
diff --git a/drivers/edac/pnd2_edac.h b/drivers/edac/pnd2_edac.h
new file mode 100644
index 000000000000..61b6e79492bb
--- /dev/null
+++ b/drivers/edac/pnd2_edac.h
@@ -0,0 +1,301 @@
1/*
2 * Register bitfield descriptions for Pondicherry2 memory controller.
3 *
4 * Copyright (c) 2016, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#ifndef _PND2_REGS_H
17#define _PND2_REGS_H
18
19struct b_cr_touud_lo_pci {
20 u32 lock : 1;
21 u32 reserved_1 : 19;
22 u32 touud : 12;
23};
24
25#define b_cr_touud_lo_pci_port 0x4c
26#define b_cr_touud_lo_pci_offset 0xa8
27#define b_cr_touud_lo_pci_r_opcode 0x04
28
29struct b_cr_touud_hi_pci {
30 u32 touud : 7;
31 u32 reserved_0 : 25;
32};
33
34#define b_cr_touud_hi_pci_port 0x4c
35#define b_cr_touud_hi_pci_offset 0xac
36#define b_cr_touud_hi_pci_r_opcode 0x04
37
38struct b_cr_tolud_pci {
39 u32 lock : 1;
40 u32 reserved_0 : 19;
41 u32 tolud : 12;
42};
43
44#define b_cr_tolud_pci_port 0x4c
45#define b_cr_tolud_pci_offset 0xbc
46#define b_cr_tolud_pci_r_opcode 0x04
47
48struct b_cr_mchbar_lo_pci {
49 u32 enable : 1;
50 u32 pad_3_1 : 3;
51 u32 pad_14_4: 11;
52 u32 base: 17;
53};
54
55struct b_cr_mchbar_hi_pci {
56 u32 base : 7;
57 u32 pad_31_7 : 25;
58};
59
60/* Symmetric region */
61struct b_cr_slice_channel_hash {
62 u64 slice_1_disabled : 1;
63 u64 hvm_mode : 1;
64 u64 interleave_mode : 2;
65 u64 slice_0_mem_disabled : 1;
66 u64 reserved_0 : 1;
67 u64 slice_hash_mask : 14;
68 u64 reserved_1 : 11;
69 u64 enable_pmi_dual_data_mode : 1;
70 u64 ch_1_disabled : 1;
71 u64 reserved_2 : 1;
72 u64 sym_slice0_channel_enabled : 2;
73 u64 sym_slice1_channel_enabled : 2;
74 u64 ch_hash_mask : 14;
75 u64 reserved_3 : 11;
76 u64 lock : 1;
77};
78
79#define b_cr_slice_channel_hash_port 0x4c
80#define b_cr_slice_channel_hash_offset 0x4c58
81#define b_cr_slice_channel_hash_r_opcode 0x06
82
83struct b_cr_mot_out_base_mchbar {
84 u32 reserved_0 : 14;
85 u32 mot_out_base : 15;
86 u32 reserved_1 : 1;
87 u32 tr_en : 1;
88 u32 imr_en : 1;
89};
90
91#define b_cr_mot_out_base_mchbar_port 0x4c
92#define b_cr_mot_out_base_mchbar_offset 0x6af0
93#define b_cr_mot_out_base_mchbar_r_opcode 0x00
94
95struct b_cr_mot_out_mask_mchbar {
96 u32 reserved_0 : 14;
97 u32 mot_out_mask : 15;
98 u32 reserved_1 : 1;
99 u32 ia_iwb_en : 1;
100 u32 gt_iwb_en : 1;
101};
102
103#define b_cr_mot_out_mask_mchbar_port 0x4c
104#define b_cr_mot_out_mask_mchbar_offset 0x6af4
105#define b_cr_mot_out_mask_mchbar_r_opcode 0x00
106
107struct b_cr_asym_mem_region0_mchbar {
108 u32 pad : 4;
109 u32 slice0_asym_base : 11;
110 u32 pad_18_15 : 4;
111 u32 slice0_asym_limit : 11;
112 u32 slice0_asym_channel_select : 1;
113 u32 slice0_asym_enable : 1;
114};
115
116#define b_cr_asym_mem_region0_mchbar_port 0x4c
117#define b_cr_asym_mem_region0_mchbar_offset 0x6e40
118#define b_cr_asym_mem_region0_mchbar_r_opcode 0x00
119
120struct b_cr_asym_mem_region1_mchbar {
121 u32 pad : 4;
122 u32 slice1_asym_base : 11;
123 u32 pad_18_15 : 4;
124 u32 slice1_asym_limit : 11;
125 u32 slice1_asym_channel_select : 1;
126 u32 slice1_asym_enable : 1;
127};
128
129#define b_cr_asym_mem_region1_mchbar_port 0x4c
130#define b_cr_asym_mem_region1_mchbar_offset 0x6e44
131#define b_cr_asym_mem_region1_mchbar_r_opcode 0x00
132
133/* Some bit fields moved in above two structs on Denverton */
134struct b_cr_asym_mem_region_denverton {
135 u32 pad : 4;
136 u32 slice_asym_base : 8;
137 u32 pad_19_12 : 8;
138 u32 slice_asym_limit : 8;
139 u32 pad_28_30 : 3;
140 u32 slice_asym_enable : 1;
141};
142
143struct b_cr_asym_2way_mem_region_mchbar {
144 u32 pad : 2;
145 u32 asym_2way_intlv_mode : 2;
146 u32 asym_2way_base : 11;
147 u32 pad_16_15 : 2;
148 u32 asym_2way_limit : 11;
149 u32 pad_30_28 : 3;
150 u32 asym_2way_interleave_enable : 1;
151};
152
153#define b_cr_asym_2way_mem_region_mchbar_port 0x4c
154#define b_cr_asym_2way_mem_region_mchbar_offset 0x6e50
155#define b_cr_asym_2way_mem_region_mchbar_r_opcode 0x00
156
157/* Apollo Lake d-unit */
158
159struct d_cr_drp0 {
160 u32 rken0 : 1;
161 u32 rken1 : 1;
162 u32 ddmen : 1;
163 u32 rsvd3 : 1;
164 u32 dwid : 2;
165 u32 dden : 3;
166 u32 rsvd13_9 : 5;
167 u32 rsien : 1;
168 u32 bahen : 1;
169 u32 rsvd18_16 : 3;
170 u32 caswizzle : 2;
171 u32 eccen : 1;
172 u32 dramtype : 3;
173 u32 blmode : 3;
174 u32 addrdec : 2;
175 u32 dramdevice_pr : 2;
176};
177
178#define d_cr_drp0_offset 0x1400
179#define d_cr_drp0_r_opcode 0x00
180
181/* Denverton d-unit */
182
183struct d_cr_dsch {
184 u32 ch0en : 1;
185 u32 ch1en : 1;
186 u32 ddr4en : 1;
187 u32 coldwake : 1;
188 u32 newbypdis : 1;
189 u32 chan_width : 1;
190 u32 rsvd6_6 : 1;
191 u32 ooodis : 1;
192 u32 rsvd18_8 : 11;
193 u32 ic : 1;
194 u32 rsvd31_20 : 12;
195};
196
197#define d_cr_dsch_port 0x16
198#define d_cr_dsch_offset 0x0
199#define d_cr_dsch_r_opcode 0x0
200
201struct d_cr_ecc_ctrl {
202 u32 eccen : 1;
203 u32 rsvd31_1 : 31;
204};
205
206#define d_cr_ecc_ctrl_offset 0x180
207#define d_cr_ecc_ctrl_r_opcode 0x0
208
209struct d_cr_drp {
210 u32 rken0 : 1;
211 u32 rken1 : 1;
212 u32 rken2 : 1;
213 u32 rken3 : 1;
214 u32 dimmdwid0 : 2;
215 u32 dimmdden0 : 2;
216 u32 dimmdwid1 : 2;
217 u32 dimmdden1 : 2;
218 u32 rsvd15_12 : 4;
219 u32 dimmflip : 1;
220 u32 rsvd31_17 : 15;
221};
222
223#define d_cr_drp_offset 0x158
224#define d_cr_drp_r_opcode 0x0
225
226struct d_cr_dmap {
227 u32 ba0 : 5;
228 u32 ba1 : 5;
229 u32 bg0 : 5; /* if ddr3, ba2 = bg0 */
230 u32 bg1 : 5; /* if ddr3, ba3 = bg1 */
231 u32 rs0 : 5;
232 u32 rs1 : 5;
233 u32 rsvd : 2;
234};
235
236#define d_cr_dmap_offset 0x174
237#define d_cr_dmap_r_opcode 0x0
238
239struct d_cr_dmap1 {
240 u32 ca11 : 6;
241 u32 bxor : 1;
242 u32 rsvd : 25;
243};
244
245#define d_cr_dmap1_offset 0xb4
246#define d_cr_dmap1_r_opcode 0x0
247
248struct d_cr_dmap2 {
249 u32 row0 : 5;
250 u32 row1 : 5;
251 u32 row2 : 5;
252 u32 row3 : 5;
253 u32 row4 : 5;
254 u32 row5 : 5;
255 u32 rsvd : 2;
256};
257
258#define d_cr_dmap2_offset 0x148
259#define d_cr_dmap2_r_opcode 0x0
260
261struct d_cr_dmap3 {
262 u32 row6 : 5;
263 u32 row7 : 5;
264 u32 row8 : 5;
265 u32 row9 : 5;
266 u32 row10 : 5;
267 u32 row11 : 5;
268 u32 rsvd : 2;
269};
270
271#define d_cr_dmap3_offset 0x14c
272#define d_cr_dmap3_r_opcode 0x0
273
274struct d_cr_dmap4 {
275 u32 row12 : 5;
276 u32 row13 : 5;
277 u32 row14 : 5;
278 u32 row15 : 5;
279 u32 row16 : 5;
280 u32 row17 : 5;
281 u32 rsvd : 2;
282};
283
284#define d_cr_dmap4_offset 0x150
285#define d_cr_dmap4_r_opcode 0x0
286
287struct d_cr_dmap5 {
288 u32 ca3 : 4;
289 u32 ca4 : 4;
290 u32 ca5 : 4;
291 u32 ca6 : 4;
292 u32 ca7 : 4;
293 u32 ca8 : 4;
294 u32 ca9 : 4;
295 u32 rsvd : 4;
296};
297
298#define d_cr_dmap5_offset 0x154
299#define d_cr_dmap5_r_opcode 0x0
300
301#endif /* _PND2_REGS_H */
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 6c270d9d304a..669246056812 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -1596,7 +1596,7 @@ static void xgene_edac_pa_report(struct edac_device_ctl_info *edac_dev)
1596 reg = readl(ctx->dev_csr + IOBPATRANSERRINTSTS); 1596 reg = readl(ctx->dev_csr + IOBPATRANSERRINTSTS);
1597 if (!reg) 1597 if (!reg)
1598 goto chk_iob_axi0; 1598 goto chk_iob_axi0;
1599 dev_err(edac_dev->dev, "IOB procesing agent (PA) transaction error\n"); 1599 dev_err(edac_dev->dev, "IOB processing agent (PA) transaction error\n");
1600 if (reg & IOBPA_RDATA_CORRUPT_MASK) 1600 if (reg & IOBPA_RDATA_CORRUPT_MASK)
1601 dev_err(edac_dev->dev, "IOB PA read data RAM error\n"); 1601 dev_err(edac_dev->dev, "IOB PA read data RAM error\n");
1602 if (reg & IOBPA_M_RDATA_CORRUPT_MASK) 1602 if (reg & IOBPA_M_RDATA_CORRUPT_MASK)
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 96bbae579c0b..fc09c76248b4 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -44,7 +44,7 @@ config EXTCON_GPIO
44 44
45config EXTCON_INTEL_INT3496 45config EXTCON_INTEL_INT3496
46 tristate "Intel INT3496 ACPI device extcon driver" 46 tristate "Intel INT3496 ACPI device extcon driver"
47 depends on GPIOLIB && ACPI 47 depends on GPIOLIB && ACPI && (X86 || COMPILE_TEST)
48 help 48 help
49 Say Y here to enable extcon support for USB OTG ports controlled by 49 Say Y here to enable extcon support for USB OTG ports controlled by
50 an Intel INT3496 ACPI device. 50 an Intel INT3496 ACPI device.
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c
index a3131b036de6..9d17984bbbd4 100644
--- a/drivers/extcon/extcon-intel-int3496.c
+++ b/drivers/extcon/extcon-intel-int3496.c
@@ -45,6 +45,17 @@ static const unsigned int int3496_cable[] = {
45 EXTCON_NONE, 45 EXTCON_NONE,
46}; 46};
47 47
48static const struct acpi_gpio_params id_gpios = { INT3496_GPIO_USB_ID, 0, false };
49static const struct acpi_gpio_params vbus_gpios = { INT3496_GPIO_VBUS_EN, 0, false };
50static const struct acpi_gpio_params mux_gpios = { INT3496_GPIO_USB_MUX, 0, false };
51
52static const struct acpi_gpio_mapping acpi_int3496_default_gpios[] = {
53 { "id-gpios", &id_gpios, 1 },
54 { "vbus-gpios", &vbus_gpios, 1 },
55 { "mux-gpios", &mux_gpios, 1 },
56 { },
57};
58
48static void int3496_do_usb_id(struct work_struct *work) 59static void int3496_do_usb_id(struct work_struct *work)
49{ 60{
50 struct int3496_data *data = 61 struct int3496_data *data =
@@ -83,6 +94,13 @@ static int int3496_probe(struct platform_device *pdev)
83 struct int3496_data *data; 94 struct int3496_data *data;
84 int ret; 95 int ret;
85 96
97 ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev),
98 acpi_int3496_default_gpios);
99 if (ret) {
100 dev_err(dev, "can't add GPIO ACPI mapping\n");
101 return ret;
102 }
103
86 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 104 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
87 if (!data) 105 if (!data)
88 return -ENOMEM; 106 return -ENOMEM;
@@ -90,30 +108,27 @@ static int int3496_probe(struct platform_device *pdev)
90 data->dev = dev; 108 data->dev = dev;
91 INIT_DELAYED_WORK(&data->work, int3496_do_usb_id); 109 INIT_DELAYED_WORK(&data->work, int3496_do_usb_id);
92 110
93 data->gpio_usb_id = devm_gpiod_get_index(dev, "id", 111 data->gpio_usb_id = devm_gpiod_get(dev, "id", GPIOD_IN);
94 INT3496_GPIO_USB_ID,
95 GPIOD_IN);
96 if (IS_ERR(data->gpio_usb_id)) { 112 if (IS_ERR(data->gpio_usb_id)) {
97 ret = PTR_ERR(data->gpio_usb_id); 113 ret = PTR_ERR(data->gpio_usb_id);
98 dev_err(dev, "can't request USB ID GPIO: %d\n", ret); 114 dev_err(dev, "can't request USB ID GPIO: %d\n", ret);
99 return ret; 115 return ret;
116 } else if (gpiod_get_direction(data->gpio_usb_id) != GPIOF_DIR_IN) {
117 dev_warn(dev, FW_BUG "USB ID GPIO not in input mode, fixing\n");
118 gpiod_direction_input(data->gpio_usb_id);
100 } 119 }
101 120
102 data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id); 121 data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id);
103 if (data->usb_id_irq <= 0) { 122 if (data->usb_id_irq < 0) {
104 dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq); 123 dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq);
105 return -EINVAL; 124 return data->usb_id_irq;
106 } 125 }
107 126
108 data->gpio_vbus_en = devm_gpiod_get_index(dev, "vbus en", 127 data->gpio_vbus_en = devm_gpiod_get(dev, "vbus", GPIOD_ASIS);
109 INT3496_GPIO_VBUS_EN,
110 GPIOD_ASIS);
111 if (IS_ERR(data->gpio_vbus_en)) 128 if (IS_ERR(data->gpio_vbus_en))
112 dev_info(dev, "can't request VBUS EN GPIO\n"); 129 dev_info(dev, "can't request VBUS EN GPIO\n");
113 130
114 data->gpio_usb_mux = devm_gpiod_get_index(dev, "usb mux", 131 data->gpio_usb_mux = devm_gpiod_get(dev, "mux", GPIOD_ASIS);
115 INT3496_GPIO_USB_MUX,
116 GPIOD_ASIS);
117 if (IS_ERR(data->gpio_usb_mux)) 132 if (IS_ERR(data->gpio_usb_mux))
118 dev_info(dev, "can't request USB MUX GPIO\n"); 133 dev_info(dev, "can't request USB MUX GPIO\n");
119 134
@@ -154,6 +169,8 @@ static int int3496_remove(struct platform_device *pdev)
154 devm_free_irq(&pdev->dev, data->usb_id_irq, data); 169 devm_free_irq(&pdev->dev, data->usb_id_irq, data);
155 cancel_delayed_work_sync(&data->work); 170 cancel_delayed_work_sync(&data->work);
156 171
172 acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev));
173
157 return 0; 174 return 0;
158} 175}
159 176
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index e7d404059b73..b372aad3b449 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -389,7 +389,6 @@ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
389 return 0; 389 return 0;
390 } 390 }
391 } 391 }
392 pr_err_once("requested map not found.\n");
393 return -ENOENT; 392 return -ENOENT;
394} 393}
395 394
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 08b026864d4e..8554d7aec31c 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -254,7 +254,7 @@ void __init efi_esrt_init(void)
254 254
255 rc = efi_mem_desc_lookup(efi.esrt, &md); 255 rc = efi_mem_desc_lookup(efi.esrt, &md);
256 if (rc < 0) { 256 if (rc < 0) {
257 pr_err("ESRT header is not in the memory map.\n"); 257 pr_warn("ESRT header is not in the memory map.\n");
258 return; 258 return;
259 } 259 }
260 260
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index 932742e4cf23..24c461dea7af 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -149,7 +149,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
149 149
150 status = __gop_query32(sys_table_arg, gop32, &info, &size, 150 status = __gop_query32(sys_table_arg, gop32, &info, &size,
151 &current_fb_base); 151 &current_fb_base);
152 if (status == EFI_SUCCESS && (!first_gop || conout_found)) { 152 if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
153 info->pixel_format != PIXEL_BLT_ONLY) {
153 /* 154 /*
154 * Systems that use the UEFI Console Splitter may 155 * Systems that use the UEFI Console Splitter may
155 * provide multiple GOP devices, not all of which are 156 * provide multiple GOP devices, not all of which are
@@ -266,7 +267,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
266 267
267 status = __gop_query64(sys_table_arg, gop64, &info, &size, 268 status = __gop_query64(sys_table_arg, gop64, &info, &size,
268 &current_fb_base); 269 &current_fb_base);
269 if (status == EFI_SUCCESS && (!first_gop || conout_found)) { 270 if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
271 info->pixel_format != PIXEL_BLT_ONLY) {
270 /* 272 /*
271 * Systems that use the UEFI Console Splitter may 273 * Systems that use the UEFI Console Splitter may
272 * provide multiple GOP devices, not all of which are 274 * provide multiple GOP devices, not all of which are
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
index 9e1a138fed53..16a8951b2bed 100644
--- a/drivers/gpio/gpio-altera-a10sr.c
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -96,7 +96,7 @@ static int altr_a10sr_gpio_probe(struct platform_device *pdev)
96 gpio->regmap = a10sr->regmap; 96 gpio->regmap = a10sr->regmap;
97 97
98 gpio->gp = altr_a10sr_gc; 98 gpio->gp = altr_a10sr_gc;
99 99 gpio->gp.parent = pdev->dev.parent;
100 gpio->gp.of_node = pdev->dev.of_node; 100 gpio->gp.of_node = pdev->dev.of_node;
101 101
102 ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio); 102 ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio);
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 5bddbd507ca9..3fe6a21e05a5 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -90,21 +90,18 @@ static int altera_gpio_irq_set_type(struct irq_data *d,
90 90
91 altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d)); 91 altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
92 92
93 if (type == IRQ_TYPE_NONE) 93 if (type == IRQ_TYPE_NONE) {
94 irq_set_handler_locked(d, handle_bad_irq);
94 return 0; 95 return 0;
95 if (type == IRQ_TYPE_LEVEL_HIGH && 96 }
96 altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH) 97 if (type == altera_gc->interrupt_trigger) {
97 return 0; 98 if (type == IRQ_TYPE_LEVEL_HIGH)
98 if (type == IRQ_TYPE_EDGE_RISING && 99 irq_set_handler_locked(d, handle_level_irq);
99 altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_RISING) 100 else
100 return 0; 101 irq_set_handler_locked(d, handle_simple_irq);
101 if (type == IRQ_TYPE_EDGE_FALLING &&
102 altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_FALLING)
103 return 0;
104 if (type == IRQ_TYPE_EDGE_BOTH &&
105 altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_BOTH)
106 return 0; 102 return 0;
107 103 }
104 irq_set_handler_locked(d, handle_bad_irq);
108 return -EINVAL; 105 return -EINVAL;
109} 106}
110 107
@@ -230,7 +227,6 @@ static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
230 chained_irq_exit(chip, desc); 227 chained_irq_exit(chip, desc);
231} 228}
232 229
233
234static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc) 230static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
235{ 231{
236 struct altera_gpio_chip *altera_gc; 232 struct altera_gpio_chip *altera_gc;
@@ -310,7 +306,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
310 altera_gc->interrupt_trigger = reg; 306 altera_gc->interrupt_trigger = reg;
311 307
312 ret = gpiochip_irqchip_add(&altera_gc->mmchip.gc, &altera_irq_chip, 0, 308 ret = gpiochip_irqchip_add(&altera_gc->mmchip.gc, &altera_irq_chip, 0,
313 handle_simple_irq, IRQ_TYPE_NONE); 309 handle_bad_irq, IRQ_TYPE_NONE);
314 310
315 if (ret) { 311 if (ret) {
316 dev_err(&pdev->dev, "could not add irqchip\n"); 312 dev_err(&pdev->dev, "could not add irqchip\n");
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index bdb692345428..2a57d024481d 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -270,8 +270,10 @@ mcp23s08_direction_output(struct gpio_chip *chip, unsigned offset, int value)
270static irqreturn_t mcp23s08_irq(int irq, void *data) 270static irqreturn_t mcp23s08_irq(int irq, void *data)
271{ 271{
272 struct mcp23s08 *mcp = data; 272 struct mcp23s08 *mcp = data;
273 int intcap, intf, i; 273 int intcap, intf, i, gpio, gpio_orig, intcap_mask;
274 unsigned int child_irq; 274 unsigned int child_irq;
275 bool intf_set, intcap_changed, gpio_bit_changed,
276 defval_changed, gpio_set;
275 277
276 mutex_lock(&mcp->lock); 278 mutex_lock(&mcp->lock);
277 if (mcp_read(mcp, MCP_INTF, &intf) < 0) { 279 if (mcp_read(mcp, MCP_INTF, &intf) < 0) {
@@ -287,14 +289,67 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
287 } 289 }
288 290
289 mcp->cache[MCP_INTCAP] = intcap; 291 mcp->cache[MCP_INTCAP] = intcap;
292
293 /* This clears the interrupt(configurable on S18) */
294 if (mcp_read(mcp, MCP_GPIO, &gpio) < 0) {
295 mutex_unlock(&mcp->lock);
296 return IRQ_HANDLED;
297 }
298 gpio_orig = mcp->cache[MCP_GPIO];
299 mcp->cache[MCP_GPIO] = gpio;
290 mutex_unlock(&mcp->lock); 300 mutex_unlock(&mcp->lock);
291 301
302 if (mcp->cache[MCP_INTF] == 0) {
303 /* There is no interrupt pending */
304 return IRQ_HANDLED;
305 }
306
307 dev_dbg(mcp->chip.parent,
308 "intcap 0x%04X intf 0x%04X gpio_orig 0x%04X gpio 0x%04X\n",
309 intcap, intf, gpio_orig, gpio);
292 310
293 for (i = 0; i < mcp->chip.ngpio; i++) { 311 for (i = 0; i < mcp->chip.ngpio; i++) {
294 if ((BIT(i) & mcp->cache[MCP_INTF]) && 312 /* We must check all of the inputs on the chip,
295 ((BIT(i) & intcap & mcp->irq_rise) || 313 * otherwise we may not notice a change on >=2 pins.
296 (mcp->irq_fall & ~intcap & BIT(i)) || 314 *
297 (BIT(i) & mcp->cache[MCP_INTCON]))) { 315 * On at least the mcp23s17, INTCAP is only updated
316 * one byte at a time(INTCAPA and INTCAPB are
317 * not written to at the same time - only on a per-bank
318 * basis).
319 *
320 * INTF only contains the single bit that caused the
321 * interrupt per-bank. On the mcp23s17, there is
322 * INTFA and INTFB. If two pins are changed on the A
323 * side at the same time, INTF will only have one bit
324 * set. If one pin on the A side and one pin on the B
325 * side are changed at the same time, INTF will have
326 * two bits set. Thus, INTF can't be the only check
327 * to see if the input has changed.
328 */
329
330 intf_set = BIT(i) & mcp->cache[MCP_INTF];
331 if (i < 8 && intf_set)
332 intcap_mask = 0x00FF;
333 else if (i >= 8 && intf_set)
334 intcap_mask = 0xFF00;
335 else
336 intcap_mask = 0x00;
337
338 intcap_changed = (intcap_mask &
339 (BIT(i) & mcp->cache[MCP_INTCAP])) !=
340 (intcap_mask & (BIT(i) & gpio_orig));
341 gpio_set = BIT(i) & mcp->cache[MCP_GPIO];
342 gpio_bit_changed = (BIT(i) & gpio_orig) !=
343 (BIT(i) & mcp->cache[MCP_GPIO]);
344 defval_changed = (BIT(i) & mcp->cache[MCP_INTCON]) &&
345 ((BIT(i) & mcp->cache[MCP_GPIO]) !=
346 (BIT(i) & mcp->cache[MCP_DEFVAL]));
347
348 if (((gpio_bit_changed || intcap_changed) &&
349 (BIT(i) & mcp->irq_rise) && gpio_set) ||
350 ((gpio_bit_changed || intcap_changed) &&
351 (BIT(i) & mcp->irq_fall) && !gpio_set) ||
352 defval_changed) {
298 child_irq = irq_find_mapping(mcp->chip.irqdomain, i); 353 child_irq = irq_find_mapping(mcp->chip.irqdomain, i);
299 handle_nested_irq(child_irq); 354 handle_nested_irq(child_irq);
300 } 355 }
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 06dac72cb69c..d99338689213 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -197,7 +197,7 @@ static ssize_t gpio_mockup_event_write(struct file *file,
197 struct seq_file *sfile; 197 struct seq_file *sfile;
198 struct gpio_desc *desc; 198 struct gpio_desc *desc;
199 struct gpio_chip *gc; 199 struct gpio_chip *gc;
200 int status, val; 200 int val;
201 char buf; 201 char buf;
202 202
203 sfile = file->private_data; 203 sfile = file->private_data;
@@ -206,9 +206,8 @@ static ssize_t gpio_mockup_event_write(struct file *file,
206 chip = priv->chip; 206 chip = priv->chip;
207 gc = &chip->gc; 207 gc = &chip->gc;
208 208
209 status = copy_from_user(&buf, usr_buf, 1); 209 if (copy_from_user(&buf, usr_buf, 1))
210 if (status) 210 return -EFAULT;
211 return status;
212 211
213 if (buf == '0') 212 if (buf == '0')
214 val = 0; 213 val = 0;
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
index 40a8881c2ce8..f1c6ec17b90a 100644
--- a/drivers/gpio/gpio-xgene.c
+++ b/drivers/gpio/gpio-xgene.c
@@ -42,9 +42,7 @@ struct xgene_gpio {
42 struct gpio_chip chip; 42 struct gpio_chip chip;
43 void __iomem *base; 43 void __iomem *base;
44 spinlock_t lock; 44 spinlock_t lock;
45#ifdef CONFIG_PM
46 u32 set_dr_val[XGENE_MAX_GPIO_BANKS]; 45 u32 set_dr_val[XGENE_MAX_GPIO_BANKS];
47#endif
48}; 46};
49 47
50static int xgene_gpio_get(struct gpio_chip *gc, unsigned int offset) 48static int xgene_gpio_get(struct gpio_chip *gc, unsigned int offset)
@@ -138,8 +136,7 @@ static int xgene_gpio_dir_out(struct gpio_chip *gc,
138 return 0; 136 return 0;
139} 137}
140 138
141#ifdef CONFIG_PM 139static __maybe_unused int xgene_gpio_suspend(struct device *dev)
142static int xgene_gpio_suspend(struct device *dev)
143{ 140{
144 struct xgene_gpio *gpio = dev_get_drvdata(dev); 141 struct xgene_gpio *gpio = dev_get_drvdata(dev);
145 unsigned long bank_offset; 142 unsigned long bank_offset;
@@ -152,7 +149,7 @@ static int xgene_gpio_suspend(struct device *dev)
152 return 0; 149 return 0;
153} 150}
154 151
155static int xgene_gpio_resume(struct device *dev) 152static __maybe_unused int xgene_gpio_resume(struct device *dev)
156{ 153{
157 struct xgene_gpio *gpio = dev_get_drvdata(dev); 154 struct xgene_gpio *gpio = dev_get_drvdata(dev);
158 unsigned long bank_offset; 155 unsigned long bank_offset;
@@ -166,10 +163,6 @@ static int xgene_gpio_resume(struct device *dev)
166} 163}
167 164
168static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume); 165static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume);
169#define XGENE_GPIO_PM_OPS (&xgene_gpio_pm)
170#else
171#define XGENE_GPIO_PM_OPS NULL
172#endif
173 166
174static int xgene_gpio_probe(struct platform_device *pdev) 167static int xgene_gpio_probe(struct platform_device *pdev)
175{ 168{
@@ -241,7 +234,7 @@ static struct platform_driver xgene_gpio_driver = {
241 .name = "xgene-gpio", 234 .name = "xgene-gpio",
242 .of_match_table = xgene_gpio_of_match, 235 .of_match_table = xgene_gpio_of_match,
243 .acpi_match_table = ACPI_PTR(xgene_gpio_acpi_match), 236 .acpi_match_table = ACPI_PTR(xgene_gpio_acpi_match),
244 .pm = XGENE_GPIO_PM_OPS, 237 .pm = &xgene_gpio_pm,
245 }, 238 },
246 .probe = xgene_gpio_probe, 239 .probe = xgene_gpio_probe,
247}; 240};
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 9b37a3692b3f..2bd683e2be02 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -266,6 +266,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
266 goto fail_free_event; 266 goto fail_free_event;
267 } 267 }
268 268
269 if (agpio->wake_capable == ACPI_WAKE_CAPABLE)
270 enable_irq_wake(irq);
271
269 list_add_tail(&event->node, &acpi_gpio->events); 272 list_add_tail(&event->node, &acpi_gpio->events);
270 return AE_OK; 273 return AE_OK;
271 274
@@ -339,6 +342,9 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
339 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { 342 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
340 struct gpio_desc *desc; 343 struct gpio_desc *desc;
341 344
345 if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
346 disable_irq_wake(event->irq);
347
342 free_irq(event->irq, event); 348 free_irq(event->irq, event);
343 desc = event->desc; 349 desc = event->desc;
344 if (WARN_ON(IS_ERR(desc))) 350 if (WARN_ON(IS_ERR(desc)))
@@ -571,8 +577,10 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
571 } 577 }
572 578
573 desc = acpi_get_gpiod_by_index(adev, propname, idx, &info); 579 desc = acpi_get_gpiod_by_index(adev, propname, idx, &info);
574 if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER)) 580 if (!IS_ERR(desc))
575 break; 581 break;
582 if (PTR_ERR(desc) == -EPROBE_DEFER)
583 return ERR_CAST(desc);
576 } 584 }
577 585
578 /* Then from plain _CRS GPIOs */ 586 /* Then from plain _CRS GPIOs */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index a3a105ec99e2..de0cf3315484 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -475,7 +475,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
475 int r; 475 int r;
476 476
477 if (adev->wb.wb_obj == NULL) { 477 if (adev->wb.wb_obj == NULL) {
478 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4, 478 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
479 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 479 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
480 &adev->wb.wb_obj, &adev->wb.gpu_addr, 480 &adev->wb.wb_obj, &adev->wb.gpu_addr,
481 (void **)&adev->wb.wb); 481 (void **)&adev->wb.wb);
@@ -488,7 +488,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
488 memset(&adev->wb.used, 0, sizeof(adev->wb.used)); 488 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
489 489
490 /* clear wb memory */ 490 /* clear wb memory */
491 memset((char *)adev->wb.wb, 0, AMDGPU_GPU_PAGE_SIZE); 491 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
492 } 492 }
493 493
494 return 0; 494 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f7adbace428a..b76cd699eb0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -421,6 +421,7 @@ static const struct pci_device_id pciidlist[] = {
421 {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 421 {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
422 {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 422 {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
423 {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 423 {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
424 {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
424 {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 425 {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
425 426
426 {0, 0, 0} 427 {0, 0, 0}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 33b504bafb88..c5dec210d529 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3465,9 +3465,13 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
3465 max_sclk = 75000; 3465 max_sclk = 75000;
3466 } 3466 }
3467 } else if (adev->asic_type == CHIP_OLAND) { 3467 } else if (adev->asic_type == CHIP_OLAND) {
3468 if ((adev->pdev->device == 0x6604) && 3468 if ((adev->pdev->revision == 0xC7) ||
3469 (adev->pdev->subsystem_vendor == 0x1028) && 3469 (adev->pdev->revision == 0x80) ||
3470 (adev->pdev->subsystem_device == 0x066F)) { 3470 (adev->pdev->revision == 0x81) ||
3471 (adev->pdev->revision == 0x83) ||
3472 (adev->pdev->revision == 0x87) ||
3473 (adev->pdev->device == 0x6604) ||
3474 (adev->pdev->device == 0x6605)) {
3471 max_sclk = 75000; 3475 max_sclk = 75000;
3472 } 3476 }
3473 } 3477 }
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index f6d4d9700734..324a688b3f30 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1260,9 +1260,9 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
1260 * to KMS, hence fail if different settings are requested. 1260 * to KMS, hence fail if different settings are requested.
1261 */ 1261 */
1262 if (var->bits_per_pixel != fb->format->cpp[0] * 8 || 1262 if (var->bits_per_pixel != fb->format->cpp[0] * 8 ||
1263 var->xres != fb->width || var->yres != fb->height || 1263 var->xres > fb->width || var->yres > fb->height ||
1264 var->xres_virtual != fb->width || var->yres_virtual != fb->height) { 1264 var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
1265 DRM_DEBUG("fb userspace requested width/height/bpp different than current fb " 1265 DRM_DEBUG("fb requested width/height/bpp can't fit in current fb "
1266 "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n", 1266 "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
1267 var->xres, var->yres, var->bits_per_pixel, 1267 var->xres, var->yres, var->bits_per_pixel,
1268 var->xres_virtual, var->yres_virtual, 1268 var->xres_virtual, var->yres_virtual,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 130d7d517a19..b78d9239e48f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1311,15 +1311,15 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1311 goto out_pm_put; 1311 goto out_pm_put;
1312 } 1312 }
1313 1313
1314 mutex_lock(&gpu->lock);
1315
1314 fence = etnaviv_gpu_fence_alloc(gpu); 1316 fence = etnaviv_gpu_fence_alloc(gpu);
1315 if (!fence) { 1317 if (!fence) {
1316 event_free(gpu, event); 1318 event_free(gpu, event);
1317 ret = -ENOMEM; 1319 ret = -ENOMEM;
1318 goto out_pm_put; 1320 goto out_unlock;
1319 } 1321 }
1320 1322
1321 mutex_lock(&gpu->lock);
1322
1323 gpu->event[event].fence = fence; 1323 gpu->event[event].fence = fence;
1324 submit->fence = fence->seqno; 1324 submit->fence = fence->seqno;
1325 gpu->active_fence = submit->fence; 1325 gpu->active_fence = submit->fence;
@@ -1357,6 +1357,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1357 hangcheck_timer_reset(gpu); 1357 hangcheck_timer_reset(gpu);
1358 ret = 0; 1358 ret = 0;
1359 1359
1360out_unlock:
1360 mutex_unlock(&gpu->lock); 1361 mutex_unlock(&gpu->lock);
1361 1362
1362out_pm_put: 1363out_pm_put:
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 0fd6f7a18364..c0e8d3302292 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -68,6 +68,8 @@ struct decon_context {
68 unsigned long flags; 68 unsigned long flags;
69 unsigned long out_type; 69 unsigned long out_type;
70 int first_win; 70 int first_win;
71 spinlock_t vblank_lock;
72 u32 frame_id;
71}; 73};
72 74
73static const uint32_t decon_formats[] = { 75static const uint32_t decon_formats[] = {
@@ -103,7 +105,7 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
103 if (ctx->out_type & IFTYPE_I80) 105 if (ctx->out_type & IFTYPE_I80)
104 val |= VIDINTCON0_FRAMEDONE; 106 val |= VIDINTCON0_FRAMEDONE;
105 else 107 else
106 val |= VIDINTCON0_INTFRMEN; 108 val |= VIDINTCON0_INTFRMEN | VIDINTCON0_FRAMESEL_FP;
107 109
108 writel(val, ctx->addr + DECON_VIDINTCON0); 110 writel(val, ctx->addr + DECON_VIDINTCON0);
109 } 111 }
@@ -122,14 +124,56 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
122 writel(0, ctx->addr + DECON_VIDINTCON0); 124 writel(0, ctx->addr + DECON_VIDINTCON0);
123} 125}
124 126
127/* return number of starts/ends of frame transmissions since reset */
128static u32 decon_get_frame_count(struct decon_context *ctx, bool end)
129{
130 u32 frm, pfrm, status, cnt = 2;
131
132 /* To get consistent result repeat read until frame id is stable.
133 * Usually the loop will be executed once, in rare cases when the loop
134 * is executed at frame change time 2nd pass will be needed.
135 */
136 frm = readl(ctx->addr + DECON_CRFMID);
137 do {
138 status = readl(ctx->addr + DECON_VIDCON1);
139 pfrm = frm;
140 frm = readl(ctx->addr + DECON_CRFMID);
141 } while (frm != pfrm && --cnt);
142
143 /* CRFMID is incremented on BPORCH in case of I80 and on VSYNC in case
144 * of RGB, it should be taken into account.
145 */
146 if (!frm)
147 return 0;
148
149 switch (status & (VIDCON1_VSTATUS_MASK | VIDCON1_I80_ACTIVE)) {
150 case VIDCON1_VSTATUS_VS:
151 if (!(ctx->out_type & IFTYPE_I80))
152 --frm;
153 break;
154 case VIDCON1_VSTATUS_BP:
155 --frm;
156 break;
157 case VIDCON1_I80_ACTIVE:
158 case VIDCON1_VSTATUS_AC:
159 if (end)
160 --frm;
161 break;
162 default:
163 break;
164 }
165
166 return frm;
167}
168
125static void decon_setup_trigger(struct decon_context *ctx) 169static void decon_setup_trigger(struct decon_context *ctx)
126{ 170{
127 if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))) 171 if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)))
128 return; 172 return;
129 173
130 if (!(ctx->out_type & I80_HW_TRG)) { 174 if (!(ctx->out_type & I80_HW_TRG)) {
131 writel(TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN 175 writel(TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
132 | TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN, 176 TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN,
133 ctx->addr + DECON_TRIGCON); 177 ctx->addr + DECON_TRIGCON);
134 return; 178 return;
135 } 179 }
@@ -365,11 +409,14 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
365static void decon_atomic_flush(struct exynos_drm_crtc *crtc) 409static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
366{ 410{
367 struct decon_context *ctx = crtc->ctx; 411 struct decon_context *ctx = crtc->ctx;
412 unsigned long flags;
368 int i; 413 int i;
369 414
370 if (test_bit(BIT_SUSPENDED, &ctx->flags)) 415 if (test_bit(BIT_SUSPENDED, &ctx->flags))
371 return; 416 return;
372 417
418 spin_lock_irqsave(&ctx->vblank_lock, flags);
419
373 for (i = ctx->first_win; i < WINDOWS_NR; i++) 420 for (i = ctx->first_win; i < WINDOWS_NR; i++)
374 decon_shadow_protect_win(ctx, i, false); 421 decon_shadow_protect_win(ctx, i, false);
375 422
@@ -378,11 +425,18 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
378 425
379 if (ctx->out_type & IFTYPE_I80) 426 if (ctx->out_type & IFTYPE_I80)
380 set_bit(BIT_WIN_UPDATED, &ctx->flags); 427 set_bit(BIT_WIN_UPDATED, &ctx->flags);
428
429 ctx->frame_id = decon_get_frame_count(ctx, true);
430
431 exynos_crtc_handle_event(crtc);
432
433 spin_unlock_irqrestore(&ctx->vblank_lock, flags);
381} 434}
382 435
383static void decon_swreset(struct decon_context *ctx) 436static void decon_swreset(struct decon_context *ctx)
384{ 437{
385 unsigned int tries; 438 unsigned int tries;
439 unsigned long flags;
386 440
387 writel(0, ctx->addr + DECON_VIDCON0); 441 writel(0, ctx->addr + DECON_VIDCON0);
388 for (tries = 2000; tries; --tries) { 442 for (tries = 2000; tries; --tries) {
@@ -400,6 +454,10 @@ static void decon_swreset(struct decon_context *ctx)
400 454
401 WARN(tries == 0, "failed to software reset DECON\n"); 455 WARN(tries == 0, "failed to software reset DECON\n");
402 456
457 spin_lock_irqsave(&ctx->vblank_lock, flags);
458 ctx->frame_id = 0;
459 spin_unlock_irqrestore(&ctx->vblank_lock, flags);
460
403 if (!(ctx->out_type & IFTYPE_HDMI)) 461 if (!(ctx->out_type & IFTYPE_HDMI))
404 return; 462 return;
405 463
@@ -578,6 +636,24 @@ static const struct component_ops decon_component_ops = {
578 .unbind = decon_unbind, 636 .unbind = decon_unbind,
579}; 637};
580 638
639static void decon_handle_vblank(struct decon_context *ctx)
640{
641 u32 frm;
642
643 spin_lock(&ctx->vblank_lock);
644
645 frm = decon_get_frame_count(ctx, true);
646
647 if (frm != ctx->frame_id) {
648 /* handle only if incremented, take care of wrap-around */
649 if ((s32)(frm - ctx->frame_id) > 0)
650 drm_crtc_handle_vblank(&ctx->crtc->base);
651 ctx->frame_id = frm;
652 }
653
654 spin_unlock(&ctx->vblank_lock);
655}
656
581static irqreturn_t decon_irq_handler(int irq, void *dev_id) 657static irqreturn_t decon_irq_handler(int irq, void *dev_id)
582{ 658{
583 struct decon_context *ctx = dev_id; 659 struct decon_context *ctx = dev_id;
@@ -598,7 +674,7 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
598 (VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F)) 674 (VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F))
599 return IRQ_HANDLED; 675 return IRQ_HANDLED;
600 } 676 }
601 drm_crtc_handle_vblank(&ctx->crtc->base); 677 decon_handle_vblank(ctx);
602 } 678 }
603 679
604out: 680out:
@@ -671,6 +747,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
671 __set_bit(BIT_SUSPENDED, &ctx->flags); 747 __set_bit(BIT_SUSPENDED, &ctx->flags);
672 ctx->dev = dev; 748 ctx->dev = dev;
673 ctx->out_type = (unsigned long)of_device_get_match_data(dev); 749 ctx->out_type = (unsigned long)of_device_get_match_data(dev);
750 spin_lock_init(&ctx->vblank_lock);
674 751
675 if (ctx->out_type & IFTYPE_HDMI) { 752 if (ctx->out_type & IFTYPE_HDMI) {
676 ctx->first_win = 1; 753 ctx->first_win = 1;
@@ -678,7 +755,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
678 ctx->out_type |= IFTYPE_I80; 755 ctx->out_type |= IFTYPE_I80;
679 } 756 }
680 757
681 if (ctx->out_type | I80_HW_TRG) { 758 if (ctx->out_type & I80_HW_TRG) {
682 ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node, 759 ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
683 "samsung,disp-sysreg"); 760 "samsung,disp-sysreg");
684 if (IS_ERR(ctx->sysreg)) { 761 if (IS_ERR(ctx->sysreg)) {
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index f9ab19e205e2..48811806fa27 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -526,6 +526,7 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
526 526
527 for (i = 0; i < WINDOWS_NR; i++) 527 for (i = 0; i < WINDOWS_NR; i++)
528 decon_shadow_protect_win(ctx, i, false); 528 decon_shadow_protect_win(ctx, i, false);
529 exynos_crtc_handle_event(crtc);
529} 530}
530 531
531static void decon_init(struct decon_context *ctx) 532static void decon_init(struct decon_context *ctx)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 5367b6664fe3..c65f4509932c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -85,16 +85,28 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
85 struct drm_crtc_state *old_crtc_state) 85 struct drm_crtc_state *old_crtc_state)
86{ 86{
87 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 87 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
88 struct drm_pending_vblank_event *event;
89 unsigned long flags;
90 88
91 if (exynos_crtc->ops->atomic_flush) 89 if (exynos_crtc->ops->atomic_flush)
92 exynos_crtc->ops->atomic_flush(exynos_crtc); 90 exynos_crtc->ops->atomic_flush(exynos_crtc);
91}
92
93static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
94 .enable = exynos_drm_crtc_enable,
95 .disable = exynos_drm_crtc_disable,
96 .mode_set_nofb = exynos_drm_crtc_mode_set_nofb,
97 .atomic_check = exynos_crtc_atomic_check,
98 .atomic_begin = exynos_crtc_atomic_begin,
99 .atomic_flush = exynos_crtc_atomic_flush,
100};
101
102void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc)
103{
104 struct drm_crtc *crtc = &exynos_crtc->base;
105 struct drm_pending_vblank_event *event = crtc->state->event;
106 unsigned long flags;
93 107
94 event = crtc->state->event;
95 if (event) { 108 if (event) {
96 crtc->state->event = NULL; 109 crtc->state->event = NULL;
97
98 spin_lock_irqsave(&crtc->dev->event_lock, flags); 110 spin_lock_irqsave(&crtc->dev->event_lock, flags);
99 if (drm_crtc_vblank_get(crtc) == 0) 111 if (drm_crtc_vblank_get(crtc) == 0)
100 drm_crtc_arm_vblank_event(crtc, event); 112 drm_crtc_arm_vblank_event(crtc, event);
@@ -105,15 +117,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
105 117
106} 118}
107 119
108static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
109 .enable = exynos_drm_crtc_enable,
110 .disable = exynos_drm_crtc_disable,
111 .mode_set_nofb = exynos_drm_crtc_mode_set_nofb,
112 .atomic_check = exynos_crtc_atomic_check,
113 .atomic_begin = exynos_crtc_atomic_begin,
114 .atomic_flush = exynos_crtc_atomic_flush,
115};
116
117static void exynos_drm_crtc_destroy(struct drm_crtc *crtc) 120static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
118{ 121{
119 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 122 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 6a581a8af465..abd5d6ceac0c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -40,4 +40,6 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
40 */ 40 */
41void exynos_drm_crtc_te_handler(struct drm_crtc *crtc); 41void exynos_drm_crtc_te_handler(struct drm_crtc *crtc);
42 42
43void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc);
44
43#endif 45#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 812e2ec0761d..d7ef26370e67 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -86,7 +86,7 @@
86#define DSIM_SYNC_INFORM (1 << 27) 86#define DSIM_SYNC_INFORM (1 << 27)
87#define DSIM_EOT_DISABLE (1 << 28) 87#define DSIM_EOT_DISABLE (1 << 28)
88#define DSIM_MFLUSH_VS (1 << 29) 88#define DSIM_MFLUSH_VS (1 << 29)
89/* This flag is valid only for exynos3250/3472/4415/5260/5430 */ 89/* This flag is valid only for exynos3250/3472/5260/5430 */
90#define DSIM_CLKLANE_STOP (1 << 30) 90#define DSIM_CLKLANE_STOP (1 << 30)
91 91
92/* DSIM_ESCMODE */ 92/* DSIM_ESCMODE */
@@ -473,17 +473,6 @@ static const struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
473 .reg_values = reg_values, 473 .reg_values = reg_values,
474}; 474};
475 475
476static const struct exynos_dsi_driver_data exynos4415_dsi_driver_data = {
477 .reg_ofs = exynos_reg_ofs,
478 .plltmr_reg = 0x58,
479 .has_clklane_stop = 1,
480 .num_clks = 2,
481 .max_freq = 1000,
482 .wait_for_reset = 1,
483 .num_bits_resol = 11,
484 .reg_values = reg_values,
485};
486
487static const struct exynos_dsi_driver_data exynos5_dsi_driver_data = { 476static const struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
488 .reg_ofs = exynos_reg_ofs, 477 .reg_ofs = exynos_reg_ofs,
489 .plltmr_reg = 0x58, 478 .plltmr_reg = 0x58,
@@ -521,8 +510,6 @@ static const struct of_device_id exynos_dsi_of_match[] = {
521 .data = &exynos3_dsi_driver_data }, 510 .data = &exynos3_dsi_driver_data },
522 { .compatible = "samsung,exynos4210-mipi-dsi", 511 { .compatible = "samsung,exynos4210-mipi-dsi",
523 .data = &exynos4_dsi_driver_data }, 512 .data = &exynos4_dsi_driver_data },
524 { .compatible = "samsung,exynos4415-mipi-dsi",
525 .data = &exynos4415_dsi_driver_data },
526 { .compatible = "samsung,exynos5410-mipi-dsi", 513 { .compatible = "samsung,exynos5410-mipi-dsi",
527 .data = &exynos5_dsi_driver_data }, 514 .data = &exynos5_dsi_driver_data },
528 { .compatible = "samsung,exynos5422-mipi-dsi", 515 { .compatible = "samsung,exynos5422-mipi-dsi",
@@ -979,7 +966,7 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
979 bool first = !xfer->tx_done; 966 bool first = !xfer->tx_done;
980 u32 reg; 967 u32 reg;
981 968
982 dev_dbg(dev, "< xfer %p: tx len %u, done %u, rx len %u, done %u\n", 969 dev_dbg(dev, "< xfer %pK: tx len %u, done %u, rx len %u, done %u\n",
983 xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done); 970 xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done);
984 971
985 if (length > DSI_TX_FIFO_SIZE) 972 if (length > DSI_TX_FIFO_SIZE)
@@ -1177,7 +1164,7 @@ static bool exynos_dsi_transfer_finish(struct exynos_dsi *dsi)
1177 spin_unlock_irqrestore(&dsi->transfer_lock, flags); 1164 spin_unlock_irqrestore(&dsi->transfer_lock, flags);
1178 1165
1179 dev_dbg(dsi->dev, 1166 dev_dbg(dsi->dev,
1180 "> xfer %p, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n", 1167 "> xfer %pK, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n",
1181 xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len, 1168 xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len,
1182 xfer->rx_done); 1169 xfer->rx_done);
1183 1170
@@ -1348,9 +1335,12 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
1348 int te_gpio_irq; 1335 int te_gpio_irq;
1349 1336
1350 dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0); 1337 dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0);
1338 if (dsi->te_gpio == -ENOENT)
1339 return 0;
1340
1351 if (!gpio_is_valid(dsi->te_gpio)) { 1341 if (!gpio_is_valid(dsi->te_gpio)) {
1352 dev_err(dsi->dev, "no te-gpios specified\n");
1353 ret = dsi->te_gpio; 1342 ret = dsi->te_gpio;
1343 dev_err(dsi->dev, "cannot get te-gpios, %d\n", ret);
1354 goto out; 1344 goto out;
1355 } 1345 }
1356 1346
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 95871577015d..5b18b5c5fdf2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1695,7 +1695,7 @@ static int fimc_probe(struct platform_device *pdev)
1695 goto err_put_clk; 1695 goto err_put_clk;
1696 } 1696 }
1697 1697
1698 DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv); 1698 DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv);
1699 1699
1700 spin_lock_init(&ctx->lock); 1700 spin_lock_init(&ctx->lock);
1701 platform_set_drvdata(pdev, ctx); 1701 platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index a9fa444c6053..3f04d72c448d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -71,10 +71,10 @@
71#define TRIGCON 0x1A4 71#define TRIGCON 0x1A4
72#define TRGMODE_ENABLE (1 << 0) 72#define TRGMODE_ENABLE (1 << 0)
73#define SWTRGCMD_ENABLE (1 << 1) 73#define SWTRGCMD_ENABLE (1 << 1)
74/* Exynos3250, 3472, 4415, 5260 5410, 5420 and 5422 only supported. */ 74/* Exynos3250, 3472, 5260 5410, 5420 and 5422 only supported. */
75#define HWTRGEN_ENABLE (1 << 3) 75#define HWTRGEN_ENABLE (1 << 3)
76#define HWTRGMASK_ENABLE (1 << 4) 76#define HWTRGMASK_ENABLE (1 << 4)
77/* Exynos3250, 3472, 4415, 5260, 5420 and 5422 only supported. */ 77/* Exynos3250, 3472, 5260, 5420 and 5422 only supported. */
78#define HWTRIGEN_PER_ENABLE (1 << 31) 78#define HWTRIGEN_PER_ENABLE (1 << 31)
79 79
80/* display mode change control register except exynos4 */ 80/* display mode change control register except exynos4 */
@@ -138,18 +138,6 @@ static struct fimd_driver_data exynos4_fimd_driver_data = {
138 .has_vtsel = 1, 138 .has_vtsel = 1,
139}; 139};
140 140
141static struct fimd_driver_data exynos4415_fimd_driver_data = {
142 .timing_base = 0x20000,
143 .lcdblk_offset = 0x210,
144 .lcdblk_vt_shift = 10,
145 .lcdblk_bypass_shift = 1,
146 .trg_type = I80_HW_TRG,
147 .has_shadowcon = 1,
148 .has_vidoutcon = 1,
149 .has_vtsel = 1,
150 .has_trigger_per_te = 1,
151};
152
153static struct fimd_driver_data exynos5_fimd_driver_data = { 141static struct fimd_driver_data exynos5_fimd_driver_data = {
154 .timing_base = 0x20000, 142 .timing_base = 0x20000,
155 .lcdblk_offset = 0x214, 143 .lcdblk_offset = 0x214,
@@ -210,8 +198,6 @@ static const struct of_device_id fimd_driver_dt_match[] = {
210 .data = &exynos3_fimd_driver_data }, 198 .data = &exynos3_fimd_driver_data },
211 { .compatible = "samsung,exynos4210-fimd", 199 { .compatible = "samsung,exynos4210-fimd",
212 .data = &exynos4_fimd_driver_data }, 200 .data = &exynos4_fimd_driver_data },
213 { .compatible = "samsung,exynos4415-fimd",
214 .data = &exynos4415_fimd_driver_data },
215 { .compatible = "samsung,exynos5250-fimd", 201 { .compatible = "samsung,exynos5250-fimd",
216 .data = &exynos5_fimd_driver_data }, 202 .data = &exynos5_fimd_driver_data },
217 { .compatible = "samsung,exynos5420-fimd", 203 { .compatible = "samsung,exynos5420-fimd",
@@ -257,7 +243,7 @@ static int fimd_enable_vblank(struct exynos_drm_crtc *crtc)
257 val |= VIDINTCON0_INT_FRAME; 243 val |= VIDINTCON0_INT_FRAME;
258 244
259 val &= ~VIDINTCON0_FRAMESEL0_MASK; 245 val &= ~VIDINTCON0_FRAMESEL0_MASK;
260 val |= VIDINTCON0_FRAMESEL0_VSYNC; 246 val |= VIDINTCON0_FRAMESEL0_FRONTPORCH;
261 val &= ~VIDINTCON0_FRAMESEL1_MASK; 247 val &= ~VIDINTCON0_FRAMESEL1_MASK;
262 val |= VIDINTCON0_FRAMESEL1_NONE; 248 val |= VIDINTCON0_FRAMESEL1_NONE;
263 } 249 }
@@ -723,6 +709,8 @@ static void fimd_atomic_flush(struct exynos_drm_crtc *crtc)
723 709
724 for (i = 0; i < WINDOWS_NR; i++) 710 for (i = 0; i < WINDOWS_NR; i++)
725 fimd_shadow_protect_win(ctx, i, false); 711 fimd_shadow_protect_win(ctx, i, false);
712
713 exynos_crtc_handle_event(crtc);
726} 714}
727 715
728static void fimd_update_plane(struct exynos_drm_crtc *crtc, 716static void fimd_update_plane(struct exynos_drm_crtc *crtc,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 4c28f7ffcc4d..55a1579d11b3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -218,7 +218,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
218 return ERR_PTR(ret); 218 return ERR_PTR(ret);
219 } 219 }
220 220
221 DRM_DEBUG_KMS("created file object = %p\n", obj->filp); 221 DRM_DEBUG_KMS("created file object = %pK\n", obj->filp);
222 222
223 return exynos_gem; 223 return exynos_gem;
224} 224}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index bef57987759d..0506b2b17ac1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1723,7 +1723,7 @@ static int gsc_probe(struct platform_device *pdev)
1723 return ret; 1723 return ret;
1724 } 1724 }
1725 1725
1726 DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv); 1726 DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv);
1727 1727
1728 mutex_init(&ctx->lock); 1728 mutex_init(&ctx->lock);
1729 platform_set_drvdata(pdev, ctx); 1729 platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 9c84ee76f18a..3edda18cc2d2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -208,7 +208,7 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
208 * e.g PAUSE state, queue buf, command control. 208 * e.g PAUSE state, queue buf, command control.
209 */ 209 */
210 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 210 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
211 DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", count++, ippdrv); 211 DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n", count++, ippdrv);
212 212
213 mutex_lock(&ippdrv->cmd_lock); 213 mutex_lock(&ippdrv->cmd_lock);
214 list_for_each_entry(c_node, &ippdrv->cmd_list, list) { 214 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
@@ -388,7 +388,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
388 } 388 }
389 property->prop_id = ret; 389 property->prop_id = ret;
390 390
391 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%p]\n", 391 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%pK]\n",
392 property->prop_id, property->cmd, ippdrv); 392 property->prop_id, property->cmd, ippdrv);
393 393
394 /* stored property information and ippdrv in private data */ 394 /* stored property information and ippdrv in private data */
@@ -518,7 +518,7 @@ static int ipp_put_mem_node(struct drm_device *drm_dev,
518{ 518{
519 int i; 519 int i;
520 520
521 DRM_DEBUG_KMS("node[%p]\n", m_node); 521 DRM_DEBUG_KMS("node[%pK]\n", m_node);
522 522
523 if (!m_node) { 523 if (!m_node) {
524 DRM_ERROR("invalid dequeue node.\n"); 524 DRM_ERROR("invalid dequeue node.\n");
@@ -562,7 +562,7 @@ static struct drm_exynos_ipp_mem_node
562 m_node->buf_id = qbuf->buf_id; 562 m_node->buf_id = qbuf->buf_id;
563 INIT_LIST_HEAD(&m_node->list); 563 INIT_LIST_HEAD(&m_node->list);
564 564
565 DRM_DEBUG_KMS("m_node[%p]ops_id[%d]\n", m_node, qbuf->ops_id); 565 DRM_DEBUG_KMS("m_node[%pK]ops_id[%d]\n", m_node, qbuf->ops_id);
566 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); 566 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
567 567
568 for_each_ipp_planar(i) { 568 for_each_ipp_planar(i) {
@@ -659,7 +659,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
659 659
660 mutex_lock(&c_node->event_lock); 660 mutex_lock(&c_node->event_lock);
661 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { 661 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
662 DRM_DEBUG_KMS("count[%d]e[%p]\n", count++, e); 662 DRM_DEBUG_KMS("count[%d]e[%pK]\n", count++, e);
663 663
664 /* 664 /*
665 * qbuf == NULL condition means all event deletion. 665 * qbuf == NULL condition means all event deletion.
@@ -750,7 +750,7 @@ static struct drm_exynos_ipp_mem_node
750 750
751 /* find memory node from memory list */ 751 /* find memory node from memory list */
752 list_for_each_entry(m_node, head, list) { 752 list_for_each_entry(m_node, head, list) {
753 DRM_DEBUG_KMS("count[%d]m_node[%p]\n", count++, m_node); 753 DRM_DEBUG_KMS("count[%d]m_node[%pK]\n", count++, m_node);
754 754
755 /* compare buffer id */ 755 /* compare buffer id */
756 if (m_node->buf_id == qbuf->buf_id) 756 if (m_node->buf_id == qbuf->buf_id)
@@ -767,7 +767,7 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
767 struct exynos_drm_ipp_ops *ops = NULL; 767 struct exynos_drm_ipp_ops *ops = NULL;
768 int ret = 0; 768 int ret = 0;
769 769
770 DRM_DEBUG_KMS("node[%p]\n", m_node); 770 DRM_DEBUG_KMS("node[%pK]\n", m_node);
771 771
772 if (!m_node) { 772 if (!m_node) {
773 DRM_ERROR("invalid queue node.\n"); 773 DRM_ERROR("invalid queue node.\n");
@@ -1232,7 +1232,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1232 m_node = list_first_entry(head, 1232 m_node = list_first_entry(head,
1233 struct drm_exynos_ipp_mem_node, list); 1233 struct drm_exynos_ipp_mem_node, list);
1234 1234
1235 DRM_DEBUG_KMS("m_node[%p]\n", m_node); 1235 DRM_DEBUG_KMS("m_node[%pK]\n", m_node);
1236 1236
1237 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1237 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1238 if (ret) { 1238 if (ret) {
@@ -1601,7 +1601,7 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1601 } 1601 }
1602 ippdrv->prop_list.ipp_id = ret; 1602 ippdrv->prop_list.ipp_id = ret;
1603 1603
1604 DRM_DEBUG_KMS("count[%d]ippdrv[%p]ipp_id[%d]\n", 1604 DRM_DEBUG_KMS("count[%d]ippdrv[%pK]ipp_id[%d]\n",
1605 count++, ippdrv, ret); 1605 count++, ippdrv, ret);
1606 1606
1607 /* store parent device for node */ 1607 /* store parent device for node */
@@ -1659,7 +1659,7 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1659 1659
1660 file_priv->ipp_dev = dev; 1660 file_priv->ipp_dev = dev;
1661 1661
1662 DRM_DEBUG_KMS("done priv[%p]\n", dev); 1662 DRM_DEBUG_KMS("done priv[%pK]\n", dev);
1663 1663
1664 return 0; 1664 return 0;
1665} 1665}
@@ -1676,7 +1676,7 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1676 mutex_lock(&ippdrv->cmd_lock); 1676 mutex_lock(&ippdrv->cmd_lock);
1677 list_for_each_entry_safe(c_node, tc_node, 1677 list_for_each_entry_safe(c_node, tc_node,
1678 &ippdrv->cmd_list, list) { 1678 &ippdrv->cmd_list, list) {
1679 DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", 1679 DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n",
1680 count++, ippdrv); 1680 count++, ippdrv);
1681 1681
1682 if (c_node->filp == file) { 1682 if (c_node->filp == file) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 6591e406084c..79282a820ecc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -748,7 +748,7 @@ static int rotator_probe(struct platform_device *pdev)
748 goto err_ippdrv_register; 748 goto err_ippdrv_register;
749 } 749 }
750 750
751 DRM_DEBUG_KMS("ippdrv[%p]\n", ippdrv); 751 DRM_DEBUG_KMS("ippdrv[%pK]\n", ippdrv);
752 752
753 platform_set_drvdata(pdev, rot); 753 platform_set_drvdata(pdev, rot);
754 754
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 57fe514d5c5b..5d9a62a87eec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -170,6 +170,7 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
170 .enable_vblank = vidi_enable_vblank, 170 .enable_vblank = vidi_enable_vblank,
171 .disable_vblank = vidi_disable_vblank, 171 .disable_vblank = vidi_disable_vblank,
172 .update_plane = vidi_update_plane, 172 .update_plane = vidi_update_plane,
173 .atomic_flush = exynos_crtc_handle_event,
173}; 174};
174 175
175static void vidi_fake_vblank_timer(unsigned long arg) 176static void vidi_fake_vblank_timer(unsigned long arg)
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 72143ac10525..25edb635a197 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1012,6 +1012,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
1012 return; 1012 return;
1013 1013
1014 mixer_vsync_set_update(mixer_ctx, true); 1014 mixer_vsync_set_update(mixer_ctx, true);
1015 exynos_crtc_handle_event(crtc);
1015} 1016}
1016 1017
1017static void mixer_enable(struct exynos_drm_crtc *crtc) 1018static void mixer_enable(struct exynos_drm_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 3b6caaca9751..325618d969fe 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -242,7 +242,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
242 const char *item; 242 const char *item;
243 243
244 if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) { 244 if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
245 gvt_err("Invalid vGPU creation params\n"); 245 gvt_vgpu_err("Invalid vGPU creation params\n");
246 return -EINVAL; 246 return -EINVAL;
247 } 247 }
248 248
@@ -285,9 +285,9 @@ static int alloc_resource(struct intel_vgpu *vgpu,
285 return 0; 285 return 0;
286 286
287no_enough_resource: 287no_enough_resource:
288 gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item); 288 gvt_vgpu_err("fail to allocate resource %s\n", item);
289 gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n", 289 gvt_vgpu_err("request %luMB avail %luMB max %luMB taken %luMB\n",
290 vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail), 290 BYTES_TO_MB(request), BYTES_TO_MB(avail),
291 BYTES_TO_MB(max), BYTES_TO_MB(taken)); 291 BYTES_TO_MB(max), BYTES_TO_MB(taken));
292 return -ENOSPC; 292 return -ENOSPC;
293} 293}
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index b7d7721e72fa..40af17ec6312 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -285,9 +285,6 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
285{ 285{
286 int ret; 286 int ret;
287 287
288 if (vgpu->failsafe)
289 return 0;
290
291 if (WARN_ON(bytes > 4)) 288 if (WARN_ON(bytes > 4))
292 return -EINVAL; 289 return -EINVAL;
293 290
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 7ae6e2b241c8..2b92cc8a7d1a 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -817,6 +817,25 @@ static bool is_shadowed_mmio(unsigned int offset)
817 return ret; 817 return ret;
818} 818}
819 819
820static inline bool is_force_nonpriv_mmio(unsigned int offset)
821{
822 return (offset >= 0x24d0 && offset < 0x2500);
823}
824
825static int force_nonpriv_reg_handler(struct parser_exec_state *s,
826 unsigned int offset, unsigned int index)
827{
828 struct intel_gvt *gvt = s->vgpu->gvt;
829 unsigned int data = cmd_val(s, index + 1);
830
831 if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
832 gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
833 offset, data);
834 return -EINVAL;
835 }
836 return 0;
837}
838
820static int cmd_reg_handler(struct parser_exec_state *s, 839static int cmd_reg_handler(struct parser_exec_state *s,
821 unsigned int offset, unsigned int index, char *cmd) 840 unsigned int offset, unsigned int index, char *cmd)
822{ 841{
@@ -824,23 +843,26 @@ static int cmd_reg_handler(struct parser_exec_state *s,
824 struct intel_gvt *gvt = vgpu->gvt; 843 struct intel_gvt *gvt = vgpu->gvt;
825 844
826 if (offset + 4 > gvt->device_info.mmio_size) { 845 if (offset + 4 > gvt->device_info.mmio_size) {
827 gvt_err("%s access to (%x) outside of MMIO range\n", 846 gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
828 cmd, offset); 847 cmd, offset);
829 return -EINVAL; 848 return -EINVAL;
830 } 849 }
831 850
832 if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) { 851 if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
833 gvt_err("vgpu%d: %s access to non-render register (%x)\n", 852 gvt_vgpu_err("%s access to non-render register (%x)\n",
834 s->vgpu->id, cmd, offset); 853 cmd, offset);
835 return 0; 854 return 0;
836 } 855 }
837 856
838 if (is_shadowed_mmio(offset)) { 857 if (is_shadowed_mmio(offset)) {
839 gvt_err("vgpu%d: found access of shadowed MMIO %x\n", 858 gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
840 s->vgpu->id, offset);
841 return 0; 859 return 0;
842 } 860 }
843 861
862 if (is_force_nonpriv_mmio(offset) &&
863 force_nonpriv_reg_handler(s, offset, index))
864 return -EINVAL;
865
844 if (offset == i915_mmio_reg_offset(DERRMR) || 866 if (offset == i915_mmio_reg_offset(DERRMR) ||
845 offset == i915_mmio_reg_offset(FORCEWAKE_MT)) { 867 offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
846 /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */ 868 /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
@@ -1008,7 +1030,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
1008 ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl"); 1030 ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
1009 else if (post_sync == 1) { 1031 else if (post_sync == 1) {
1010 /* check ggtt*/ 1032 /* check ggtt*/
1011 if ((cmd_val(s, 2) & (1 << 2))) { 1033 if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) {
1012 gma = cmd_val(s, 2) & GENMASK(31, 3); 1034 gma = cmd_val(s, 2) & GENMASK(31, 3);
1013 if (gmadr_bytes == 8) 1035 if (gmadr_bytes == 8)
1014 gma |= (cmd_gma_hi(s, 3)) << 32; 1036 gma |= (cmd_gma_hi(s, 3)) << 32;
@@ -1129,6 +1151,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
1129 struct mi_display_flip_command_info *info) 1151 struct mi_display_flip_command_info *info)
1130{ 1152{
1131 struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; 1153 struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1154 struct intel_vgpu *vgpu = s->vgpu;
1132 u32 dword0 = cmd_val(s, 0); 1155 u32 dword0 = cmd_val(s, 0);
1133 u32 dword1 = cmd_val(s, 1); 1156 u32 dword1 = cmd_val(s, 1);
1134 u32 dword2 = cmd_val(s, 2); 1157 u32 dword2 = cmd_val(s, 2);
@@ -1167,7 +1190,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
1167 break; 1190 break;
1168 1191
1169 default: 1192 default:
1170 gvt_err("unknown plane code %d\n", plane); 1193 gvt_vgpu_err("unknown plane code %d\n", plane);
1171 return -EINVAL; 1194 return -EINVAL;
1172 } 1195 }
1173 1196
@@ -1274,25 +1297,26 @@ static int update_plane_mmio_from_mi_display_flip(
1274static int cmd_handler_mi_display_flip(struct parser_exec_state *s) 1297static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
1275{ 1298{
1276 struct mi_display_flip_command_info info; 1299 struct mi_display_flip_command_info info;
1300 struct intel_vgpu *vgpu = s->vgpu;
1277 int ret; 1301 int ret;
1278 int i; 1302 int i;
1279 int len = cmd_length(s); 1303 int len = cmd_length(s);
1280 1304
1281 ret = decode_mi_display_flip(s, &info); 1305 ret = decode_mi_display_flip(s, &info);
1282 if (ret) { 1306 if (ret) {
1283 gvt_err("fail to decode MI display flip command\n"); 1307 gvt_vgpu_err("fail to decode MI display flip command\n");
1284 return ret; 1308 return ret;
1285 } 1309 }
1286 1310
1287 ret = check_mi_display_flip(s, &info); 1311 ret = check_mi_display_flip(s, &info);
1288 if (ret) { 1312 if (ret) {
1289 gvt_err("invalid MI display flip command\n"); 1313 gvt_vgpu_err("invalid MI display flip command\n");
1290 return ret; 1314 return ret;
1291 } 1315 }
1292 1316
1293 ret = update_plane_mmio_from_mi_display_flip(s, &info); 1317 ret = update_plane_mmio_from_mi_display_flip(s, &info);
1294 if (ret) { 1318 if (ret) {
1295 gvt_err("fail to update plane mmio\n"); 1319 gvt_vgpu_err("fail to update plane mmio\n");
1296 return ret; 1320 return ret;
1297 } 1321 }
1298 1322
@@ -1350,7 +1374,8 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
1350 int ret; 1374 int ret;
1351 1375
1352 if (op_size > max_surface_size) { 1376 if (op_size > max_surface_size) {
1353 gvt_err("command address audit fail name %s\n", s->info->name); 1377 gvt_vgpu_err("command address audit fail name %s\n",
1378 s->info->name);
1354 return -EINVAL; 1379 return -EINVAL;
1355 } 1380 }
1356 1381
@@ -1367,7 +1392,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
1367 } 1392 }
1368 return 0; 1393 return 0;
1369err: 1394err:
1370 gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n", 1395 gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
1371 s->info->name, guest_gma, op_size); 1396 s->info->name, guest_gma, op_size);
1372 1397
1373 pr_err("cmd dump: "); 1398 pr_err("cmd dump: ");
@@ -1412,8 +1437,10 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
1412 1437
1413static inline int unexpected_cmd(struct parser_exec_state *s) 1438static inline int unexpected_cmd(struct parser_exec_state *s)
1414{ 1439{
1415 gvt_err("vgpu%d: Unexpected %s in command buffer!\n", 1440 struct intel_vgpu *vgpu = s->vgpu;
1416 s->vgpu->id, s->info->name); 1441
1442 gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
1443
1417 return -EINVAL; 1444 return -EINVAL;
1418} 1445}
1419 1446
@@ -1516,7 +1543,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
1516 while (gma != end_gma) { 1543 while (gma != end_gma) {
1517 gpa = intel_vgpu_gma_to_gpa(mm, gma); 1544 gpa = intel_vgpu_gma_to_gpa(mm, gma);
1518 if (gpa == INTEL_GVT_INVALID_ADDR) { 1545 if (gpa == INTEL_GVT_INVALID_ADDR) {
1519 gvt_err("invalid gma address: %lx\n", gma); 1546 gvt_vgpu_err("invalid gma address: %lx\n", gma);
1520 return -EFAULT; 1547 return -EFAULT;
1521 } 1548 }
1522 1549
@@ -1557,6 +1584,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
1557 uint32_t bb_size = 0; 1584 uint32_t bb_size = 0;
1558 uint32_t cmd_len = 0; 1585 uint32_t cmd_len = 0;
1559 bool met_bb_end = false; 1586 bool met_bb_end = false;
1587 struct intel_vgpu *vgpu = s->vgpu;
1560 u32 cmd; 1588 u32 cmd;
1561 1589
1562 /* get the start gm address of the batch buffer */ 1590 /* get the start gm address of the batch buffer */
@@ -1565,7 +1593,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
1565 1593
1566 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); 1594 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1567 if (info == NULL) { 1595 if (info == NULL) {
1568 gvt_err("unknown cmd 0x%x, opcode=0x%x\n", 1596 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
1569 cmd, get_opcode(cmd, s->ring_id)); 1597 cmd, get_opcode(cmd, s->ring_id));
1570 return -EINVAL; 1598 return -EINVAL;
1571 } 1599 }
@@ -1574,7 +1602,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
1574 gma, gma + 4, &cmd); 1602 gma, gma + 4, &cmd);
1575 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); 1603 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1576 if (info == NULL) { 1604 if (info == NULL) {
1577 gvt_err("unknown cmd 0x%x, opcode=0x%x\n", 1605 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
1578 cmd, get_opcode(cmd, s->ring_id)); 1606 cmd, get_opcode(cmd, s->ring_id));
1579 return -EINVAL; 1607 return -EINVAL;
1580 } 1608 }
@@ -1599,6 +1627,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
1599static int perform_bb_shadow(struct parser_exec_state *s) 1627static int perform_bb_shadow(struct parser_exec_state *s)
1600{ 1628{
1601 struct intel_shadow_bb_entry *entry_obj; 1629 struct intel_shadow_bb_entry *entry_obj;
1630 struct intel_vgpu *vgpu = s->vgpu;
1602 unsigned long gma = 0; 1631 unsigned long gma = 0;
1603 uint32_t bb_size; 1632 uint32_t bb_size;
1604 void *dst = NULL; 1633 void *dst = NULL;
@@ -1633,7 +1662,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
1633 1662
1634 ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false); 1663 ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
1635 if (ret) { 1664 if (ret) {
1636 gvt_err("failed to set shadow batch to CPU\n"); 1665 gvt_vgpu_err("failed to set shadow batch to CPU\n");
1637 goto unmap_src; 1666 goto unmap_src;
1638 } 1667 }
1639 1668
@@ -1645,7 +1674,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
1645 gma, gma + bb_size, 1674 gma, gma + bb_size,
1646 dst); 1675 dst);
1647 if (ret) { 1676 if (ret) {
1648 gvt_err("fail to copy guest ring buffer\n"); 1677 gvt_vgpu_err("fail to copy guest ring buffer\n");
1649 goto unmap_src; 1678 goto unmap_src;
1650 } 1679 }
1651 1680
@@ -1676,15 +1705,16 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
1676{ 1705{
1677 bool second_level; 1706 bool second_level;
1678 int ret = 0; 1707 int ret = 0;
1708 struct intel_vgpu *vgpu = s->vgpu;
1679 1709
1680 if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { 1710 if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1681 gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n"); 1711 gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
1682 return -EINVAL; 1712 return -EINVAL;
1683 } 1713 }
1684 1714
1685 second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1; 1715 second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
1686 if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) { 1716 if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
1687 gvt_err("Jumping to 2nd level BB from RB is not allowed\n"); 1717 gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
1688 return -EINVAL; 1718 return -EINVAL;
1689 } 1719 }
1690 1720
@@ -1702,7 +1732,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
1702 if (batch_buffer_needs_scan(s)) { 1732 if (batch_buffer_needs_scan(s)) {
1703 ret = perform_bb_shadow(s); 1733 ret = perform_bb_shadow(s);
1704 if (ret < 0) 1734 if (ret < 0)
1705 gvt_err("invalid shadow batch buffer\n"); 1735 gvt_vgpu_err("invalid shadow batch buffer\n");
1706 } else { 1736 } else {
1707 /* emulate a batch buffer end to do return right */ 1737 /* emulate a batch buffer end to do return right */
1708 ret = cmd_handler_mi_batch_buffer_end(s); 1738 ret = cmd_handler_mi_batch_buffer_end(s);
@@ -2429,6 +2459,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2429 int ret = 0; 2459 int ret = 0;
2430 cycles_t t0, t1, t2; 2460 cycles_t t0, t1, t2;
2431 struct parser_exec_state s_before_advance_custom; 2461 struct parser_exec_state s_before_advance_custom;
2462 struct intel_vgpu *vgpu = s->vgpu;
2432 2463
2433 t0 = get_cycles(); 2464 t0 = get_cycles();
2434 2465
@@ -2436,7 +2467,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2436 2467
2437 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); 2468 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
2438 if (info == NULL) { 2469 if (info == NULL) {
2439 gvt_err("unknown cmd 0x%x, opcode=0x%x\n", 2470 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
2440 cmd, get_opcode(cmd, s->ring_id)); 2471 cmd, get_opcode(cmd, s->ring_id));
2441 return -EINVAL; 2472 return -EINVAL;
2442 } 2473 }
@@ -2452,7 +2483,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2452 if (info->handler) { 2483 if (info->handler) {
2453 ret = info->handler(s); 2484 ret = info->handler(s);
2454 if (ret < 0) { 2485 if (ret < 0) {
2455 gvt_err("%s handler error\n", info->name); 2486 gvt_vgpu_err("%s handler error\n", info->name);
2456 return ret; 2487 return ret;
2457 } 2488 }
2458 } 2489 }
@@ -2463,7 +2494,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2463 if (!(info->flag & F_IP_ADVANCE_CUSTOM)) { 2494 if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
2464 ret = cmd_advance_default(s); 2495 ret = cmd_advance_default(s);
2465 if (ret) { 2496 if (ret) {
2466 gvt_err("%s IP advance error\n", info->name); 2497 gvt_vgpu_err("%s IP advance error\n", info->name);
2467 return ret; 2498 return ret;
2468 } 2499 }
2469 } 2500 }
@@ -2486,6 +2517,7 @@ static int command_scan(struct parser_exec_state *s,
2486 2517
2487 unsigned long gma_head, gma_tail, gma_bottom; 2518 unsigned long gma_head, gma_tail, gma_bottom;
2488 int ret = 0; 2519 int ret = 0;
2520 struct intel_vgpu *vgpu = s->vgpu;
2489 2521
2490 gma_head = rb_start + rb_head; 2522 gma_head = rb_start + rb_head;
2491 gma_tail = rb_start + rb_tail; 2523 gma_tail = rb_start + rb_tail;
@@ -2497,7 +2529,7 @@ static int command_scan(struct parser_exec_state *s,
2497 if (s->buf_type == RING_BUFFER_INSTRUCTION) { 2529 if (s->buf_type == RING_BUFFER_INSTRUCTION) {
2498 if (!(s->ip_gma >= rb_start) || 2530 if (!(s->ip_gma >= rb_start) ||
2499 !(s->ip_gma < gma_bottom)) { 2531 !(s->ip_gma < gma_bottom)) {
2500 gvt_err("ip_gma %lx out of ring scope." 2532 gvt_vgpu_err("ip_gma %lx out of ring scope."
2501 "(base:0x%lx, bottom: 0x%lx)\n", 2533 "(base:0x%lx, bottom: 0x%lx)\n",
2502 s->ip_gma, rb_start, 2534 s->ip_gma, rb_start,
2503 gma_bottom); 2535 gma_bottom);
@@ -2505,7 +2537,7 @@ static int command_scan(struct parser_exec_state *s,
2505 return -EINVAL; 2537 return -EINVAL;
2506 } 2538 }
2507 if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) { 2539 if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
2508 gvt_err("ip_gma %lx out of range." 2540 gvt_vgpu_err("ip_gma %lx out of range."
2509 "base 0x%lx head 0x%lx tail 0x%lx\n", 2541 "base 0x%lx head 0x%lx tail 0x%lx\n",
2510 s->ip_gma, rb_start, 2542 s->ip_gma, rb_start,
2511 rb_head, rb_tail); 2543 rb_head, rb_tail);
@@ -2515,7 +2547,7 @@ static int command_scan(struct parser_exec_state *s,
2515 } 2547 }
2516 ret = cmd_parser_exec(s); 2548 ret = cmd_parser_exec(s);
2517 if (ret) { 2549 if (ret) {
2518 gvt_err("cmd parser error\n"); 2550 gvt_vgpu_err("cmd parser error\n");
2519 parser_exec_state_dump(s); 2551 parser_exec_state_dump(s);
2520 break; 2552 break;
2521 } 2553 }
@@ -2639,7 +2671,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2639 gma_head, gma_top, 2671 gma_head, gma_top,
2640 workload->shadow_ring_buffer_va); 2672 workload->shadow_ring_buffer_va);
2641 if (ret) { 2673 if (ret) {
2642 gvt_err("fail to copy guest ring buffer\n"); 2674 gvt_vgpu_err("fail to copy guest ring buffer\n");
2643 return ret; 2675 return ret;
2644 } 2676 }
2645 copy_len = gma_top - gma_head; 2677 copy_len = gma_top - gma_head;
@@ -2651,7 +2683,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2651 gma_head, gma_tail, 2683 gma_head, gma_tail,
2652 workload->shadow_ring_buffer_va + copy_len); 2684 workload->shadow_ring_buffer_va + copy_len);
2653 if (ret) { 2685 if (ret) {
2654 gvt_err("fail to copy guest ring buffer\n"); 2686 gvt_vgpu_err("fail to copy guest ring buffer\n");
2655 return ret; 2687 return ret;
2656 } 2688 }
2657 ring->tail += workload->rb_len; 2689 ring->tail += workload->rb_len;
@@ -2662,16 +2694,17 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2662int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) 2694int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
2663{ 2695{
2664 int ret; 2696 int ret;
2697 struct intel_vgpu *vgpu = workload->vgpu;
2665 2698
2666 ret = shadow_workload_ring_buffer(workload); 2699 ret = shadow_workload_ring_buffer(workload);
2667 if (ret) { 2700 if (ret) {
2668 gvt_err("fail to shadow workload ring_buffer\n"); 2701 gvt_vgpu_err("fail to shadow workload ring_buffer\n");
2669 return ret; 2702 return ret;
2670 } 2703 }
2671 2704
2672 ret = scan_workload(workload); 2705 ret = scan_workload(workload);
2673 if (ret) { 2706 if (ret) {
2674 gvt_err("scan workload error\n"); 2707 gvt_vgpu_err("scan workload error\n");
2675 return ret; 2708 return ret;
2676 } 2709 }
2677 return 0; 2710 return 0;
@@ -2681,6 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2681{ 2714{
2682 int ctx_size = wa_ctx->indirect_ctx.size; 2715 int ctx_size = wa_ctx->indirect_ctx.size;
2683 unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma; 2716 unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
2717 struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
2684 struct drm_i915_gem_object *obj; 2718 struct drm_i915_gem_object *obj;
2685 int ret = 0; 2719 int ret = 0;
2686 void *map; 2720 void *map;
@@ -2694,14 +2728,14 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2694 /* get the va of the shadow batch buffer */ 2728 /* get the va of the shadow batch buffer */
2695 map = i915_gem_object_pin_map(obj, I915_MAP_WB); 2729 map = i915_gem_object_pin_map(obj, I915_MAP_WB);
2696 if (IS_ERR(map)) { 2730 if (IS_ERR(map)) {
2697 gvt_err("failed to vmap shadow indirect ctx\n"); 2731 gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
2698 ret = PTR_ERR(map); 2732 ret = PTR_ERR(map);
2699 goto put_obj; 2733 goto put_obj;
2700 } 2734 }
2701 2735
2702 ret = i915_gem_object_set_to_cpu_domain(obj, false); 2736 ret = i915_gem_object_set_to_cpu_domain(obj, false);
2703 if (ret) { 2737 if (ret) {
2704 gvt_err("failed to set shadow indirect ctx to CPU\n"); 2738 gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
2705 goto unmap_src; 2739 goto unmap_src;
2706 } 2740 }
2707 2741
@@ -2710,7 +2744,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2710 guest_gma, guest_gma + ctx_size, 2744 guest_gma, guest_gma + ctx_size,
2711 map); 2745 map);
2712 if (ret) { 2746 if (ret) {
2713 gvt_err("fail to copy guest indirect ctx\n"); 2747 gvt_vgpu_err("fail to copy guest indirect ctx\n");
2714 goto unmap_src; 2748 goto unmap_src;
2715 } 2749 }
2716 2750
@@ -2744,13 +2778,14 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2744int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) 2778int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2745{ 2779{
2746 int ret; 2780 int ret;
2781 struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
2747 2782
2748 if (wa_ctx->indirect_ctx.size == 0) 2783 if (wa_ctx->indirect_ctx.size == 0)
2749 return 0; 2784 return 0;
2750 2785
2751 ret = shadow_indirect_ctx(wa_ctx); 2786 ret = shadow_indirect_ctx(wa_ctx);
2752 if (ret) { 2787 if (ret) {
2753 gvt_err("fail to shadow indirect ctx\n"); 2788 gvt_vgpu_err("fail to shadow indirect ctx\n");
2754 return ret; 2789 return ret;
2755 } 2790 }
2756 2791
@@ -2758,7 +2793,7 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2758 2793
2759 ret = scan_wa_ctx(wa_ctx); 2794 ret = scan_wa_ctx(wa_ctx);
2760 if (ret) { 2795 if (ret) {
2761 gvt_err("scan wa ctx error\n"); 2796 gvt_vgpu_err("scan wa ctx error\n");
2762 return ret; 2797 return ret;
2763 } 2798 }
2764 2799
diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h
index 68cba7bd980a..b0cff4dc2684 100644
--- a/drivers/gpu/drm/i915/gvt/debug.h
+++ b/drivers/gpu/drm/i915/gvt/debug.h
@@ -27,6 +27,14 @@
27#define gvt_err(fmt, args...) \ 27#define gvt_err(fmt, args...) \
28 DRM_ERROR("gvt: "fmt, ##args) 28 DRM_ERROR("gvt: "fmt, ##args)
29 29
30#define gvt_vgpu_err(fmt, args...) \
31do { \
32 if (IS_ERR_OR_NULL(vgpu)) \
33 DRM_DEBUG_DRIVER("gvt: "fmt, ##args); \
34 else \
35 DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id, ##args);\
36} while (0)
37
30#define gvt_dbg_core(fmt, args...) \ 38#define gvt_dbg_core(fmt, args...) \
31 DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args) 39 DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
32 40
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index bda85dff7b2a..42cd09ec63fa 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -52,16 +52,16 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
52 unsigned char chr = 0; 52 unsigned char chr = 0;
53 53
54 if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) { 54 if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
55 gvt_err("Driver tries to read EDID without proper sequence!\n"); 55 gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n");
56 return 0; 56 return 0;
57 } 57 }
58 if (edid->current_edid_read >= EDID_SIZE) { 58 if (edid->current_edid_read >= EDID_SIZE) {
59 gvt_err("edid_get_byte() exceeds the size of EDID!\n"); 59 gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n");
60 return 0; 60 return 0;
61 } 61 }
62 62
63 if (!edid->edid_available) { 63 if (!edid->edid_available) {
64 gvt_err("Reading EDID but EDID is not available!\n"); 64 gvt_vgpu_err("Reading EDID but EDID is not available!\n");
65 return 0; 65 return 0;
66 } 66 }
67 67
@@ -72,7 +72,7 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
72 chr = edid_data->edid_block[edid->current_edid_read]; 72 chr = edid_data->edid_block[edid->current_edid_read];
73 edid->current_edid_read++; 73 edid->current_edid_read++;
74 } else { 74 } else {
75 gvt_err("No EDID available during the reading?\n"); 75 gvt_vgpu_err("No EDID available during the reading?\n");
76 } 76 }
77 return chr; 77 return chr;
78} 78}
@@ -223,7 +223,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
223 vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE; 223 vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
224 break; 224 break;
225 default: 225 default:
226 gvt_err("Unknown/reserved GMBUS cycle detected!\n"); 226 gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n");
227 break; 227 break;
228 } 228 }
229 /* 229 /*
@@ -292,8 +292,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
292 */ 292 */
293 } else { 293 } else {
294 memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes); 294 memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
295 gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n", 295 gvt_vgpu_err("warning: gmbus3 read with nothing returned\n");
296 vgpu->id);
297 } 296 }
298 return 0; 297 return 0;
299} 298}
@@ -496,7 +495,8 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
496 unsigned char val = edid_get_byte(vgpu); 495 unsigned char val = edid_get_byte(vgpu);
497 496
498 aux_data_for_write = (val << 16); 497 aux_data_for_write = (val << 16);
499 } 498 } else
499 aux_data_for_write = (0xff << 16);
500 } 500 }
501 /* write the return value in AUX_CH_DATA reg which includes: 501 /* write the return value in AUX_CH_DATA reg which includes:
502 * ACK of I2C_WRITE 502 * ACK of I2C_WRITE
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 46eb9fd3c03f..d186c157f65f 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -172,6 +172,7 @@ static int emulate_execlist_ctx_schedule_out(
172 struct intel_vgpu_execlist *execlist, 172 struct intel_vgpu_execlist *execlist,
173 struct execlist_ctx_descriptor_format *ctx) 173 struct execlist_ctx_descriptor_format *ctx)
174{ 174{
175 struct intel_vgpu *vgpu = execlist->vgpu;
175 struct intel_vgpu_execlist_slot *running = execlist->running_slot; 176 struct intel_vgpu_execlist_slot *running = execlist->running_slot;
176 struct intel_vgpu_execlist_slot *pending = execlist->pending_slot; 177 struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
177 struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0]; 178 struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
@@ -183,7 +184,7 @@ static int emulate_execlist_ctx_schedule_out(
183 gvt_dbg_el("schedule out context id %x\n", ctx->context_id); 184 gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
184 185
185 if (WARN_ON(!same_context(ctx, execlist->running_context))) { 186 if (WARN_ON(!same_context(ctx, execlist->running_context))) {
186 gvt_err("schedule out context is not running context," 187 gvt_vgpu_err("schedule out context is not running context,"
187 "ctx id %x running ctx id %x\n", 188 "ctx id %x running ctx id %x\n",
188 ctx->context_id, 189 ctx->context_id,
189 execlist->running_context->context_id); 190 execlist->running_context->context_id);
@@ -254,7 +255,7 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
254 status.udw = vgpu_vreg(vgpu, status_reg + 4); 255 status.udw = vgpu_vreg(vgpu, status_reg + 4);
255 256
256 if (status.execlist_queue_full) { 257 if (status.execlist_queue_full) {
257 gvt_err("virtual execlist slots are full\n"); 258 gvt_vgpu_err("virtual execlist slots are full\n");
258 return NULL; 259 return NULL;
259 } 260 }
260 261
@@ -270,11 +271,12 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
270 271
271 struct execlist_ctx_descriptor_format *ctx0, *ctx1; 272 struct execlist_ctx_descriptor_format *ctx0, *ctx1;
272 struct execlist_context_status_format status; 273 struct execlist_context_status_format status;
274 struct intel_vgpu *vgpu = execlist->vgpu;
273 275
274 gvt_dbg_el("emulate schedule-in\n"); 276 gvt_dbg_el("emulate schedule-in\n");
275 277
276 if (!slot) { 278 if (!slot) {
277 gvt_err("no available execlist slot\n"); 279 gvt_vgpu_err("no available execlist slot\n");
278 return -EINVAL; 280 return -EINVAL;
279 } 281 }
280 282
@@ -375,7 +377,6 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
375 377
376 vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); 378 vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
377 if (IS_ERR(vma)) { 379 if (IS_ERR(vma)) {
378 gvt_err("Cannot pin\n");
379 return; 380 return;
380 } 381 }
381 382
@@ -428,7 +429,6 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
428 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, 429 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
429 0, CACHELINE_BYTES, 0); 430 0, CACHELINE_BYTES, 0);
430 if (IS_ERR(vma)) { 431 if (IS_ERR(vma)) {
431 gvt_err("Cannot pin indirect ctx obj\n");
432 return; 432 return;
433 } 433 }
434 434
@@ -561,6 +561,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
561{ 561{
562 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; 562 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
563 struct intel_vgpu_mm *mm; 563 struct intel_vgpu_mm *mm;
564 struct intel_vgpu *vgpu = workload->vgpu;
564 int page_table_level; 565 int page_table_level;
565 u32 pdp[8]; 566 u32 pdp[8];
566 567
@@ -569,7 +570,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
569 } else if (desc->addressing_mode == 3) { /* legacy 64 bit */ 570 } else if (desc->addressing_mode == 3) { /* legacy 64 bit */
570 page_table_level = 4; 571 page_table_level = 4;
571 } else { 572 } else {
572 gvt_err("Advanced Context mode(SVM) is not supported!\n"); 573 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
573 return -EINVAL; 574 return -EINVAL;
574 } 575 }
575 576
@@ -583,7 +584,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
583 mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT, 584 mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
584 pdp, page_table_level, 0); 585 pdp, page_table_level, 0);
585 if (IS_ERR(mm)) { 586 if (IS_ERR(mm)) {
586 gvt_err("fail to create mm object.\n"); 587 gvt_vgpu_err("fail to create mm object.\n");
587 return PTR_ERR(mm); 588 return PTR_ERR(mm);
588 } 589 }
589 } 590 }
@@ -609,7 +610,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
609 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, 610 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
610 (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT)); 611 (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
611 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) { 612 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
612 gvt_err("invalid guest context LRCA: %x\n", desc->lrca); 613 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
613 return -EINVAL; 614 return -EINVAL;
614 } 615 }
615 616
@@ -724,8 +725,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
724 continue; 725 continue;
725 726
726 if (!desc[i]->privilege_access) { 727 if (!desc[i]->privilege_access) {
727 gvt_err("vgpu%d: unexpected GGTT elsp submission\n", 728 gvt_vgpu_err("unexpected GGTT elsp submission\n");
728 vgpu->id);
729 return -EINVAL; 729 return -EINVAL;
730 } 730 }
731 731
@@ -735,15 +735,13 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
735 } 735 }
736 736
737 if (!valid_desc_bitmap) { 737 if (!valid_desc_bitmap) {
738 gvt_err("vgpu%d: no valid desc in a elsp submission\n", 738 gvt_vgpu_err("no valid desc in a elsp submission\n");
739 vgpu->id);
740 return -EINVAL; 739 return -EINVAL;
741 } 740 }
742 741
743 if (!test_bit(0, (void *)&valid_desc_bitmap) && 742 if (!test_bit(0, (void *)&valid_desc_bitmap) &&
744 test_bit(1, (void *)&valid_desc_bitmap)) { 743 test_bit(1, (void *)&valid_desc_bitmap)) {
745 gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n", 744 gvt_vgpu_err("weird elsp submission, desc 0 is not valid\n");
746 vgpu->id);
747 return -EINVAL; 745 return -EINVAL;
748 } 746 }
749 747
@@ -752,8 +750,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
752 ret = submit_context(vgpu, ring_id, &valid_desc[i], 750 ret = submit_context(vgpu, ring_id, &valid_desc[i],
753 emulate_schedule_in); 751 emulate_schedule_in);
754 if (ret) { 752 if (ret) {
755 gvt_err("vgpu%d: fail to schedule workload\n", 753 gvt_vgpu_err("fail to schedule workload\n");
756 vgpu->id);
757 return ret; 754 return ret;
758 } 755 }
759 emulate_schedule_in = false; 756 emulate_schedule_in = false;
@@ -778,7 +775,8 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
778 _EL_OFFSET_STATUS_PTR); 775 _EL_OFFSET_STATUS_PTR);
779 776
780 ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); 777 ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
781 ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7; 778 ctx_status_ptr.read_ptr = 0;
779 ctx_status_ptr.write_ptr = 0x7;
782 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; 780 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
783} 781}
784 782
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 933a7c211a1c..dce8d15f706f 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -75,11 +75,11 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
75 struct gvt_firmware_header *h; 75 struct gvt_firmware_header *h;
76 void *firmware; 76 void *firmware;
77 void *p; 77 void *p;
78 unsigned long size; 78 unsigned long size, crc32_start;
79 int i; 79 int i;
80 int ret; 80 int ret;
81 81
82 size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1; 82 size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
83 firmware = vzalloc(size); 83 firmware = vzalloc(size);
84 if (!firmware) 84 if (!firmware)
85 return -ENOMEM; 85 return -ENOMEM;
@@ -112,6 +112,9 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
112 112
113 memcpy(gvt->firmware.mmio, p, info->mmio_size); 113 memcpy(gvt->firmware.mmio, p, info->mmio_size);
114 114
115 crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
116 h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start);
117
115 firmware_attr.size = size; 118 firmware_attr.size = size;
116 firmware_attr.private = firmware; 119 firmware_attr.private = firmware;
117 120
@@ -234,7 +237,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
234 237
235 firmware->mmio = mem; 238 firmware->mmio = mem;
236 239
237 sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state", 240 sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%02x.golden_hw_state",
238 GVT_FIRMWARE_PATH, pdev->vendor, pdev->device, 241 GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
239 pdev->revision); 242 pdev->revision);
240 243
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 6a5ff23ded90..b832bea64e03 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -49,8 +49,8 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
49{ 49{
50 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size 50 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
51 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) { 51 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
52 gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n", 52 gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
53 vgpu->id, addr, size); 53 addr, size);
54 return false; 54 return false;
55 } 55 }
56 return true; 56 return true;
@@ -430,7 +430,7 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
430 430
431 mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn); 431 mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
432 if (mfn == INTEL_GVT_INVALID_ADDR) { 432 if (mfn == INTEL_GVT_INVALID_ADDR) {
433 gvt_err("fail to translate gfn: 0x%lx\n", gfn); 433 gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn);
434 return -ENXIO; 434 return -ENXIO;
435 } 435 }
436 436
@@ -611,7 +611,7 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu,
611 611
612 daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); 612 daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
613 if (dma_mapping_error(kdev, daddr)) { 613 if (dma_mapping_error(kdev, daddr)) {
614 gvt_err("fail to map dma addr\n"); 614 gvt_vgpu_err("fail to map dma addr\n");
615 return -EINVAL; 615 return -EINVAL;
616 } 616 }
617 617
@@ -735,7 +735,7 @@ retry:
735 if (reclaim_one_mm(vgpu->gvt)) 735 if (reclaim_one_mm(vgpu->gvt))
736 goto retry; 736 goto retry;
737 737
738 gvt_err("fail to allocate ppgtt shadow page\n"); 738 gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
739 return ERR_PTR(-ENOMEM); 739 return ERR_PTR(-ENOMEM);
740 } 740 }
741 741
@@ -750,14 +750,14 @@ retry:
750 */ 750 */
751 ret = init_shadow_page(vgpu, &spt->shadow_page, type); 751 ret = init_shadow_page(vgpu, &spt->shadow_page, type);
752 if (ret) { 752 if (ret) {
753 gvt_err("fail to initialize shadow page for spt\n"); 753 gvt_vgpu_err("fail to initialize shadow page for spt\n");
754 goto err; 754 goto err;
755 } 755 }
756 756
757 ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page, 757 ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
758 gfn, ppgtt_write_protection_handler, NULL); 758 gfn, ppgtt_write_protection_handler, NULL);
759 if (ret) { 759 if (ret) {
760 gvt_err("fail to initialize guest page for spt\n"); 760 gvt_vgpu_err("fail to initialize guest page for spt\n");
761 goto err; 761 goto err;
762 } 762 }
763 763
@@ -776,8 +776,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
776 if (p) 776 if (p)
777 return shadow_page_to_ppgtt_spt(p); 777 return shadow_page_to_ppgtt_spt(p);
778 778
779 gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n", 779 gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn);
780 vgpu->id, mfn);
781 return NULL; 780 return NULL;
782} 781}
783 782
@@ -827,8 +826,8 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
827 } 826 }
828 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); 827 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
829 if (!s) { 828 if (!s) {
830 gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n", 829 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
831 vgpu->id, ops->get_pfn(e)); 830 ops->get_pfn(e));
832 return -ENXIO; 831 return -ENXIO;
833 } 832 }
834 return ppgtt_invalidate_shadow_page(s); 833 return ppgtt_invalidate_shadow_page(s);
@@ -836,6 +835,7 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
836 835
837static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) 836static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
838{ 837{
838 struct intel_vgpu *vgpu = spt->vgpu;
839 struct intel_gvt_gtt_entry e; 839 struct intel_gvt_gtt_entry e;
840 unsigned long index; 840 unsigned long index;
841 int ret; 841 int ret;
@@ -854,7 +854,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
854 854
855 for_each_present_shadow_entry(spt, &e, index) { 855 for_each_present_shadow_entry(spt, &e, index) {
856 if (!gtt_type_is_pt(get_next_pt_type(e.type))) { 856 if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
857 gvt_err("GVT doesn't support pse bit for now\n"); 857 gvt_vgpu_err("GVT doesn't support pse bit for now\n");
858 return -EINVAL; 858 return -EINVAL;
859 } 859 }
860 ret = ppgtt_invalidate_shadow_page_by_shadow_entry( 860 ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
@@ -868,8 +868,8 @@ release:
868 ppgtt_free_shadow_page(spt); 868 ppgtt_free_shadow_page(spt);
869 return 0; 869 return 0;
870fail: 870fail:
871 gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n", 871 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
872 spt->vgpu->id, spt, e.val64, e.type); 872 spt, e.val64, e.type);
873 return ret; 873 return ret;
874} 874}
875 875
@@ -914,8 +914,8 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
914 } 914 }
915 return s; 915 return s;
916fail: 916fail:
917 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", 917 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
918 vgpu->id, s, we->val64, we->type); 918 s, we->val64, we->type);
919 return ERR_PTR(ret); 919 return ERR_PTR(ret);
920} 920}
921 921
@@ -953,7 +953,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
953 953
954 for_each_present_guest_entry(spt, &ge, i) { 954 for_each_present_guest_entry(spt, &ge, i) {
955 if (!gtt_type_is_pt(get_next_pt_type(ge.type))) { 955 if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
956 gvt_err("GVT doesn't support pse bit now\n"); 956 gvt_vgpu_err("GVT doesn't support pse bit now\n");
957 ret = -EINVAL; 957 ret = -EINVAL;
958 goto fail; 958 goto fail;
959 } 959 }
@@ -969,8 +969,8 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
969 } 969 }
970 return 0; 970 return 0;
971fail: 971fail:
972 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", 972 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
973 vgpu->id, spt, ge.val64, ge.type); 973 spt, ge.val64, ge.type);
974 return ret; 974 return ret;
975} 975}
976 976
@@ -999,7 +999,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
999 struct intel_vgpu_ppgtt_spt *s = 999 struct intel_vgpu_ppgtt_spt *s =
1000 ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e)); 1000 ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
1001 if (!s) { 1001 if (!s) {
1002 gvt_err("fail to find guest page\n"); 1002 gvt_vgpu_err("fail to find guest page\n");
1003 ret = -ENXIO; 1003 ret = -ENXIO;
1004 goto fail; 1004 goto fail;
1005 } 1005 }
@@ -1011,8 +1011,8 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
1011 ppgtt_set_shadow_entry(spt, &e, index); 1011 ppgtt_set_shadow_entry(spt, &e, index);
1012 return 0; 1012 return 0;
1013fail: 1013fail:
1014 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", 1014 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1015 vgpu->id, spt, e.val64, e.type); 1015 spt, e.val64, e.type);
1016 return ret; 1016 return ret;
1017} 1017}
1018 1018
@@ -1046,8 +1046,8 @@ static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
1046 } 1046 }
1047 return 0; 1047 return 0;
1048fail: 1048fail:
1049 gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id, 1049 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1050 spt, we->val64, we->type); 1050 spt, we->val64, we->type);
1051 return ret; 1051 return ret;
1052} 1052}
1053 1053
@@ -1250,8 +1250,8 @@ static int ppgtt_handle_guest_write_page_table(
1250 } 1250 }
1251 return 0; 1251 return 0;
1252fail: 1252fail:
1253 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n", 1253 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1254 vgpu->id, spt, we->val64, we->type); 1254 spt, we->val64, we->type);
1255 return ret; 1255 return ret;
1256} 1256}
1257 1257
@@ -1493,7 +1493,7 @@ static int shadow_mm(struct intel_vgpu_mm *mm)
1493 1493
1494 spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); 1494 spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
1495 if (IS_ERR(spt)) { 1495 if (IS_ERR(spt)) {
1496 gvt_err("fail to populate guest root pointer\n"); 1496 gvt_vgpu_err("fail to populate guest root pointer\n");
1497 ret = PTR_ERR(spt); 1497 ret = PTR_ERR(spt);
1498 goto fail; 1498 goto fail;
1499 } 1499 }
@@ -1566,7 +1566,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
1566 1566
1567 ret = gtt->mm_alloc_page_table(mm); 1567 ret = gtt->mm_alloc_page_table(mm);
1568 if (ret) { 1568 if (ret) {
1569 gvt_err("fail to allocate page table for mm\n"); 1569 gvt_vgpu_err("fail to allocate page table for mm\n");
1570 goto fail; 1570 goto fail;
1571 } 1571 }
1572 1572
@@ -1584,7 +1584,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
1584 } 1584 }
1585 return mm; 1585 return mm;
1586fail: 1586fail:
1587 gvt_err("fail to create mm\n"); 1587 gvt_vgpu_err("fail to create mm\n");
1588 if (mm) 1588 if (mm)
1589 intel_gvt_mm_unreference(mm); 1589 intel_gvt_mm_unreference(mm);
1590 return ERR_PTR(ret); 1590 return ERR_PTR(ret);
@@ -1760,7 +1760,7 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
1760 mm->page_table_level, gma, gpa); 1760 mm->page_table_level, gma, gpa);
1761 return gpa; 1761 return gpa;
1762err: 1762err:
1763 gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma); 1763 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
1764 return INTEL_GVT_INVALID_ADDR; 1764 return INTEL_GVT_INVALID_ADDR;
1765} 1765}
1766 1766
@@ -1836,13 +1836,16 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1836 if (ops->test_present(&e)) { 1836 if (ops->test_present(&e)) {
1837 ret = gtt_entry_p2m(vgpu, &e, &m); 1837 ret = gtt_entry_p2m(vgpu, &e, &m);
1838 if (ret) { 1838 if (ret) {
1839 gvt_err("vgpu%d: fail to translate guest gtt entry\n", 1839 gvt_vgpu_err("fail to translate guest gtt entry\n");
1840 vgpu->id); 1840 /* guest driver may read/write the entry when partial
1841 return ret; 1841 * update the entry in this situation p2m will fail
1842 * settting the shadow entry to point to a scratch page
1843 */
1844 ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
1842 } 1845 }
1843 } else { 1846 } else {
1844 m = e; 1847 m = e;
1845 m.val64 = 0; 1848 ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
1846 } 1849 }
1847 1850
1848 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index); 1851 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
@@ -1893,14 +1896,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1893 1896
1894 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); 1897 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
1895 if (!scratch_pt) { 1898 if (!scratch_pt) {
1896 gvt_err("fail to allocate scratch page\n"); 1899 gvt_vgpu_err("fail to allocate scratch page\n");
1897 return -ENOMEM; 1900 return -ENOMEM;
1898 } 1901 }
1899 1902
1900 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 1903 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
1901 4096, PCI_DMA_BIDIRECTIONAL); 1904 4096, PCI_DMA_BIDIRECTIONAL);
1902 if (dma_mapping_error(dev, daddr)) { 1905 if (dma_mapping_error(dev, daddr)) {
1903 gvt_err("fail to dmamap scratch_pt\n"); 1906 gvt_vgpu_err("fail to dmamap scratch_pt\n");
1904 __free_page(virt_to_page(scratch_pt)); 1907 __free_page(virt_to_page(scratch_pt));
1905 return -ENOMEM; 1908 return -ENOMEM;
1906 } 1909 }
@@ -2003,7 +2006,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2003 ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, 2006 ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
2004 NULL, 1, 0); 2007 NULL, 1, 0);
2005 if (IS_ERR(ggtt_mm)) { 2008 if (IS_ERR(ggtt_mm)) {
2006 gvt_err("fail to create mm for ggtt.\n"); 2009 gvt_vgpu_err("fail to create mm for ggtt.\n");
2007 return PTR_ERR(ggtt_mm); 2010 return PTR_ERR(ggtt_mm);
2008 } 2011 }
2009 2012
@@ -2076,7 +2079,6 @@ static int setup_spt_oos(struct intel_gvt *gvt)
2076 for (i = 0; i < preallocated_oos_pages; i++) { 2079 for (i = 0; i < preallocated_oos_pages; i++) {
2077 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL); 2080 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2078 if (!oos_page) { 2081 if (!oos_page) {
2079 gvt_err("fail to pre-allocate oos page\n");
2080 ret = -ENOMEM; 2082 ret = -ENOMEM;
2081 goto fail; 2083 goto fail;
2082 } 2084 }
@@ -2166,7 +2168,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
2166 mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT, 2168 mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
2167 pdp, page_table_level, 0); 2169 pdp, page_table_level, 0);
2168 if (IS_ERR(mm)) { 2170 if (IS_ERR(mm)) {
2169 gvt_err("fail to create mm\n"); 2171 gvt_vgpu_err("fail to create mm\n");
2170 return PTR_ERR(mm); 2172 return PTR_ERR(mm);
2171 } 2173 }
2172 } 2174 }
@@ -2196,7 +2198,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
2196 2198
2197 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp); 2199 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
2198 if (!mm) { 2200 if (!mm) {
2199 gvt_err("fail to find ppgtt instance.\n"); 2201 gvt_vgpu_err("fail to find ppgtt instance.\n");
2200 return -EINVAL; 2202 return -EINVAL;
2201 } 2203 }
2202 intel_gvt_mm_unreference(mm); 2204 intel_gvt_mm_unreference(mm);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 3b9d59e457ba..ef3baa0c4754 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -52,6 +52,8 @@ static const struct intel_gvt_ops intel_gvt_ops = {
52 .vgpu_create = intel_gvt_create_vgpu, 52 .vgpu_create = intel_gvt_create_vgpu,
53 .vgpu_destroy = intel_gvt_destroy_vgpu, 53 .vgpu_destroy = intel_gvt_destroy_vgpu,
54 .vgpu_reset = intel_gvt_reset_vgpu, 54 .vgpu_reset = intel_gvt_reset_vgpu,
55 .vgpu_activate = intel_gvt_activate_vgpu,
56 .vgpu_deactivate = intel_gvt_deactivate_vgpu,
55}; 57};
56 58
57/** 59/**
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 23791920ced1..becae2fa3b29 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -162,7 +162,6 @@ struct intel_vgpu {
162 atomic_t running_workload_num; 162 atomic_t running_workload_num;
163 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); 163 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
164 struct i915_gem_context *shadow_ctx; 164 struct i915_gem_context *shadow_ctx;
165 struct notifier_block shadow_ctx_notifier_block;
166 165
167#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) 166#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
168 struct { 167 struct {
@@ -233,6 +232,7 @@ struct intel_gvt {
233 struct intel_gvt_gtt gtt; 232 struct intel_gvt_gtt gtt;
234 struct intel_gvt_opregion opregion; 233 struct intel_gvt_opregion opregion;
235 struct intel_gvt_workload_scheduler scheduler; 234 struct intel_gvt_workload_scheduler scheduler;
235 struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
236 DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS); 236 DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
237 struct intel_vgpu_type *types; 237 struct intel_vgpu_type *types;
238 unsigned int num_types; 238 unsigned int num_types;
@@ -382,7 +382,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
382void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, 382void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
383 unsigned int engine_mask); 383 unsigned int engine_mask);
384void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); 384void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
385 385void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
386void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
386 387
387/* validating GM functions */ 388/* validating GM functions */
388#define vgpu_gmadr_is_aperture(vgpu, gmadr) \ 389#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
@@ -449,6 +450,8 @@ struct intel_gvt_ops {
449 struct intel_vgpu_type *); 450 struct intel_vgpu_type *);
450 void (*vgpu_destroy)(struct intel_vgpu *); 451 void (*vgpu_destroy)(struct intel_vgpu *);
451 void (*vgpu_reset)(struct intel_vgpu *); 452 void (*vgpu_reset)(struct intel_vgpu *);
453 void (*vgpu_activate)(struct intel_vgpu *);
454 void (*vgpu_deactivate)(struct intel_vgpu *);
452}; 455};
453 456
454 457
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 8e43395c748a..6da9ae1618e3 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -181,11 +181,9 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
181 GVT_FAILSAFE_UNSUPPORTED_GUEST); 181 GVT_FAILSAFE_UNSUPPORTED_GUEST);
182 182
183 if (!vgpu->mmio.disable_warn_untrack) { 183 if (!vgpu->mmio.disable_warn_untrack) {
184 gvt_err("vgpu%d: found oob fence register access\n", 184 gvt_vgpu_err("found oob fence register access\n");
185 vgpu->id); 185 gvt_vgpu_err("total fence %d, access fence %d\n",
186 gvt_err("vgpu%d: total fence %d, access fence %d\n", 186 vgpu_fence_sz(vgpu), fence_num);
187 vgpu->id, vgpu_fence_sz(vgpu),
188 fence_num);
189 } 187 }
190 memset(p_data, 0, bytes); 188 memset(p_data, 0, bytes);
191 return -EINVAL; 189 return -EINVAL;
@@ -249,7 +247,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
249 break; 247 break;
250 default: 248 default:
251 /*should not hit here*/ 249 /*should not hit here*/
252 gvt_err("invalid forcewake offset 0x%x\n", offset); 250 gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset);
253 return -EINVAL; 251 return -EINVAL;
254 } 252 }
255 } else { 253 } else {
@@ -530,7 +528,7 @@ static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
530 fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2; 528 fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
531 fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK; 529 fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
532 } else { 530 } else {
533 gvt_err("Invalid train pattern %d\n", train_pattern); 531 gvt_vgpu_err("Invalid train pattern %d\n", train_pattern);
534 return -EINVAL; 532 return -EINVAL;
535 } 533 }
536 534
@@ -588,7 +586,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
588 else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX) 586 else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
589 index = FDI_RX_IMR_TO_PIPE(offset); 587 index = FDI_RX_IMR_TO_PIPE(offset);
590 else { 588 else {
591 gvt_err("Unsupport registers %x\n", offset); 589 gvt_vgpu_err("Unsupport registers %x\n", offset);
592 return -EINVAL; 590 return -EINVAL;
593 } 591 }
594 592
@@ -818,7 +816,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
818 u32 data; 816 u32 data;
819 817
820 if (!dpy_is_valid_port(port_index)) { 818 if (!dpy_is_valid_port(port_index)) {
821 gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id); 819 gvt_vgpu_err("Unsupported DP port access!\n");
822 return 0; 820 return 0;
823 } 821 }
824 822
@@ -972,6 +970,14 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
972 return 0; 970 return 0;
973} 971}
974 972
973static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset,
974 void *p_data, unsigned int bytes)
975{
976 *(u32 *)p_data &= (~GEN6_MBCTL_ENABLE_BOOT_FETCH);
977 write_vreg(vgpu, offset, p_data, bytes);
978 return 0;
979}
980
975static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 981static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
976 void *p_data, unsigned int bytes) 982 void *p_data, unsigned int bytes)
977{ 983{
@@ -1016,8 +1022,7 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
1016 1022
1017 if (i == num) { 1023 if (i == num) {
1018 if (num == SBI_REG_MAX) { 1024 if (num == SBI_REG_MAX) {
1019 gvt_err("vgpu%d: SBI caching meets maximum limits\n", 1025 gvt_vgpu_err("SBI caching meets maximum limits\n");
1020 vgpu->id);
1021 return; 1026 return;
1022 } 1027 }
1023 display->sbi.number++; 1028 display->sbi.number++;
@@ -1097,7 +1102,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
1097 break; 1102 break;
1098 } 1103 }
1099 if (invalid_read) 1104 if (invalid_read)
1100 gvt_err("invalid pvinfo read: [%x:%x] = %x\n", 1105 gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n",
1101 offset, bytes, *(u32 *)p_data); 1106 offset, bytes, *(u32 *)p_data);
1102 vgpu->pv_notified = true; 1107 vgpu->pv_notified = true;
1103 return 0; 1108 return 0;
@@ -1125,7 +1130,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
1125 case 1: /* Remove this in guest driver. */ 1130 case 1: /* Remove this in guest driver. */
1126 break; 1131 break;
1127 default: 1132 default:
1128 gvt_err("Invalid PV notification %d\n", notification); 1133 gvt_vgpu_err("Invalid PV notification %d\n", notification);
1129 } 1134 }
1130 return ret; 1135 return ret;
1131} 1136}
@@ -1181,7 +1186,7 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1181 enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE); 1186 enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
1182 break; 1187 break;
1183 default: 1188 default:
1184 gvt_err("invalid pvinfo write offset %x bytes %x data %x\n", 1189 gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
1185 offset, bytes, data); 1190 offset, bytes, data);
1186 break; 1191 break;
1187 } 1192 }
@@ -1415,7 +1420,8 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1415 if (execlist->elsp_dwords.index == 3) { 1420 if (execlist->elsp_dwords.index == 3) {
1416 ret = intel_vgpu_submit_execlist(vgpu, ring_id); 1421 ret = intel_vgpu_submit_execlist(vgpu, ring_id);
1417 if(ret) 1422 if(ret)
1418 gvt_err("fail submit workload on ring %d\n", ring_id); 1423 gvt_vgpu_err("fail submit workload on ring %d\n",
1424 ring_id);
1419 } 1425 }
1420 1426
1421 ++execlist->elsp_dwords.index; 1427 ++execlist->elsp_dwords.index;
@@ -2240,7 +2246,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
2240 MMIO_D(0x7180, D_ALL); 2246 MMIO_D(0x7180, D_ALL);
2241 MMIO_D(0x7408, D_ALL); 2247 MMIO_D(0x7408, D_ALL);
2242 MMIO_D(0x7c00, D_ALL); 2248 MMIO_D(0x7c00, D_ALL);
2243 MMIO_D(GEN6_MBCTL, D_ALL); 2249 MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
2244 MMIO_D(0x911c, D_ALL); 2250 MMIO_D(0x911c, D_ALL);
2245 MMIO_D(0x9120, D_ALL); 2251 MMIO_D(0x9120, D_ALL);
2246 MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL); 2252 MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2988,3 +2994,20 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
2988 write_vreg(vgpu, offset, p_data, bytes); 2994 write_vreg(vgpu, offset, p_data, bytes);
2989 return 0; 2995 return 0;
2990} 2996}
2997
2998/**
2999 * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
3000 * force-nopriv register
3001 *
3002 * @gvt: a GVT device
3003 * @offset: register offset
3004 *
3005 * Returns:
3006 * True if the register is in force-nonpriv whitelist;
3007 * False if outside;
3008 */
3009bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
3010 unsigned int offset)
3011{
3012 return in_whitelist(offset);
3013}
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 84d801638ede..e466259034e2 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -426,7 +426,7 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
426 426
427static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) 427static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
428{ 428{
429 struct intel_vgpu *vgpu; 429 struct intel_vgpu *vgpu = NULL;
430 struct intel_vgpu_type *type; 430 struct intel_vgpu_type *type;
431 struct device *pdev; 431 struct device *pdev;
432 void *gvt; 432 void *gvt;
@@ -437,7 +437,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
437 437
438 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); 438 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
439 if (!type) { 439 if (!type) {
440 gvt_err("failed to find type %s to create\n", 440 gvt_vgpu_err("failed to find type %s to create\n",
441 kobject_name(kobj)); 441 kobject_name(kobj));
442 ret = -EINVAL; 442 ret = -EINVAL;
443 goto out; 443 goto out;
@@ -446,7 +446,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
446 vgpu = intel_gvt_ops->vgpu_create(gvt, type); 446 vgpu = intel_gvt_ops->vgpu_create(gvt, type);
447 if (IS_ERR_OR_NULL(vgpu)) { 447 if (IS_ERR_OR_NULL(vgpu)) {
448 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); 448 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
449 gvt_err("failed to create intel vgpu: %d\n", ret); 449 gvt_vgpu_err("failed to create intel vgpu: %d\n", ret);
450 goto out; 450 goto out;
451 } 451 }
452 452
@@ -526,7 +526,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
526 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, 526 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
527 &vgpu->vdev.iommu_notifier); 527 &vgpu->vdev.iommu_notifier);
528 if (ret != 0) { 528 if (ret != 0) {
529 gvt_err("vfio_register_notifier for iommu failed: %d\n", ret); 529 gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
530 ret);
530 goto out; 531 goto out;
531 } 532 }
532 533
@@ -534,7 +535,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
534 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, 535 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
535 &vgpu->vdev.group_notifier); 536 &vgpu->vdev.group_notifier);
536 if (ret != 0) { 537 if (ret != 0) {
537 gvt_err("vfio_register_notifier for group failed: %d\n", ret); 538 gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
539 ret);
538 goto undo_iommu; 540 goto undo_iommu;
539 } 541 }
540 542
@@ -542,6 +544,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
542 if (ret) 544 if (ret)
543 goto undo_group; 545 goto undo_group;
544 546
547 intel_gvt_ops->vgpu_activate(vgpu);
548
545 atomic_set(&vgpu->vdev.released, 0); 549 atomic_set(&vgpu->vdev.released, 0);
546 return ret; 550 return ret;
547 551
@@ -567,6 +571,8 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
567 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1)) 571 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
568 return; 572 return;
569 573
574 intel_gvt_ops->vgpu_deactivate(vgpu);
575
570 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY, 576 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
571 &vgpu->vdev.iommu_notifier); 577 &vgpu->vdev.iommu_notifier);
572 WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); 578 WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
@@ -635,7 +641,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
635 641
636 642
637 if (index >= VFIO_PCI_NUM_REGIONS) { 643 if (index >= VFIO_PCI_NUM_REGIONS) {
638 gvt_err("invalid index: %u\n", index); 644 gvt_vgpu_err("invalid index: %u\n", index);
639 return -EINVAL; 645 return -EINVAL;
640 } 646 }
641 647
@@ -669,7 +675,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
669 case VFIO_PCI_VGA_REGION_INDEX: 675 case VFIO_PCI_VGA_REGION_INDEX:
670 case VFIO_PCI_ROM_REGION_INDEX: 676 case VFIO_PCI_ROM_REGION_INDEX:
671 default: 677 default:
672 gvt_err("unsupported region: %u\n", index); 678 gvt_vgpu_err("unsupported region: %u\n", index);
673 } 679 }
674 680
675 return ret == 0 ? count : ret; 681 return ret == 0 ? count : ret;
@@ -861,7 +867,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
861 867
862 trigger = eventfd_ctx_fdget(fd); 868 trigger = eventfd_ctx_fdget(fd);
863 if (IS_ERR(trigger)) { 869 if (IS_ERR(trigger)) {
864 gvt_err("eventfd_ctx_fdget failed\n"); 870 gvt_vgpu_err("eventfd_ctx_fdget failed\n");
865 return PTR_ERR(trigger); 871 return PTR_ERR(trigger);
866 } 872 }
867 vgpu->vdev.msi_trigger = trigger; 873 vgpu->vdev.msi_trigger = trigger;
@@ -1120,7 +1126,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
1120 ret = vfio_set_irqs_validate_and_prepare(&hdr, max, 1126 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
1121 VFIO_PCI_NUM_IRQS, &data_size); 1127 VFIO_PCI_NUM_IRQS, &data_size);
1122 if (ret) { 1128 if (ret) {
1123 gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n"); 1129 gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
1124 return -EINVAL; 1130 return -EINVAL;
1125 } 1131 }
1126 if (data_size) { 1132 if (data_size) {
@@ -1310,7 +1316,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
1310 1316
1311 kvm = vgpu->vdev.kvm; 1317 kvm = vgpu->vdev.kvm;
1312 if (!kvm || kvm->mm != current->mm) { 1318 if (!kvm || kvm->mm != current->mm) {
1313 gvt_err("KVM is required to use Intel vGPU\n"); 1319 gvt_vgpu_err("KVM is required to use Intel vGPU\n");
1314 return -ESRCH; 1320 return -ESRCH;
1315 } 1321 }
1316 1322
@@ -1324,6 +1330,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
1324 vgpu->handle = (unsigned long)info; 1330 vgpu->handle = (unsigned long)info;
1325 info->vgpu = vgpu; 1331 info->vgpu = vgpu;
1326 info->kvm = kvm; 1332 info->kvm = kvm;
1333 kvm_get_kvm(info->kvm);
1327 1334
1328 kvmgt_protect_table_init(info); 1335 kvmgt_protect_table_init(info);
1329 gvt_cache_init(vgpu); 1336 gvt_cache_init(vgpu);
@@ -1337,12 +1344,8 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
1337 1344
1338static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) 1345static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1339{ 1346{
1340 if (!info) {
1341 gvt_err("kvmgt_guest_info invalid\n");
1342 return false;
1343 }
1344
1345 kvm_page_track_unregister_notifier(info->kvm, &info->track_node); 1347 kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1348 kvm_put_kvm(info->kvm);
1346 kvmgt_protect_table_destroy(info); 1349 kvmgt_protect_table_destroy(info);
1347 gvt_cache_destroy(info->vgpu); 1350 gvt_cache_destroy(info->vgpu);
1348 vfree(info); 1351 vfree(info);
@@ -1383,12 +1386,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1383 unsigned long iova, pfn; 1386 unsigned long iova, pfn;
1384 struct kvmgt_guest_info *info; 1387 struct kvmgt_guest_info *info;
1385 struct device *dev; 1388 struct device *dev;
1389 struct intel_vgpu *vgpu;
1386 int rc; 1390 int rc;
1387 1391
1388 if (!handle_valid(handle)) 1392 if (!handle_valid(handle))
1389 return INTEL_GVT_INVALID_ADDR; 1393 return INTEL_GVT_INVALID_ADDR;
1390 1394
1391 info = (struct kvmgt_guest_info *)handle; 1395 info = (struct kvmgt_guest_info *)handle;
1396 vgpu = info->vgpu;
1392 iova = gvt_cache_find(info->vgpu, gfn); 1397 iova = gvt_cache_find(info->vgpu, gfn);
1393 if (iova != INTEL_GVT_INVALID_ADDR) 1398 if (iova != INTEL_GVT_INVALID_ADDR)
1394 return iova; 1399 return iova;
@@ -1397,13 +1402,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1397 dev = mdev_dev(info->vgpu->vdev.mdev); 1402 dev = mdev_dev(info->vgpu->vdev.mdev);
1398 rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn); 1403 rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
1399 if (rc != 1) { 1404 if (rc != 1) {
1400 gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc); 1405 gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
1406 gfn, rc);
1401 return INTEL_GVT_INVALID_ADDR; 1407 return INTEL_GVT_INVALID_ADDR;
1402 } 1408 }
1403 /* transfer to host iova for GFX to use DMA */ 1409 /* transfer to host iova for GFX to use DMA */
1404 rc = gvt_dma_map_iova(info->vgpu, pfn, &iova); 1410 rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
1405 if (rc) { 1411 if (rc) {
1406 gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn); 1412 gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
1407 vfio_unpin_pages(dev, &gfn, 1); 1413 vfio_unpin_pages(dev, &gfn, 1);
1408 return INTEL_GVT_INVALID_ADDR; 1414 return INTEL_GVT_INVALID_ADDR;
1409 } 1415 }
@@ -1417,7 +1423,7 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1417{ 1423{
1418 struct kvmgt_guest_info *info; 1424 struct kvmgt_guest_info *info;
1419 struct kvm *kvm; 1425 struct kvm *kvm;
1420 int ret; 1426 int idx, ret;
1421 bool kthread = current->mm == NULL; 1427 bool kthread = current->mm == NULL;
1422 1428
1423 if (!handle_valid(handle)) 1429 if (!handle_valid(handle))
@@ -1429,8 +1435,10 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1429 if (kthread) 1435 if (kthread)
1430 use_mm(kvm->mm); 1436 use_mm(kvm->mm);
1431 1437
1438 idx = srcu_read_lock(&kvm->srcu);
1432 ret = write ? kvm_write_guest(kvm, gpa, buf, len) : 1439 ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
1433 kvm_read_guest(kvm, gpa, buf, len); 1440 kvm_read_guest(kvm, gpa, buf, len);
1441 srcu_read_unlock(&kvm->srcu, idx);
1434 1442
1435 if (kthread) 1443 if (kthread)
1436 unuse_mm(kvm->mm); 1444 unuse_mm(kvm->mm);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 60b698cb8365..1ba3bdb09341 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -142,10 +142,10 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
142 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, 142 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
143 p_data, bytes); 143 p_data, bytes);
144 if (ret) { 144 if (ret) {
145 gvt_err("vgpu%d: guest page read error %d, " 145 gvt_vgpu_err("guest page read error %d, "
146 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", 146 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
147 vgpu->id, ret, 147 ret, gp->gfn, pa, *(u32 *)p_data,
148 gp->gfn, pa, *(u32 *)p_data, bytes); 148 bytes);
149 } 149 }
150 mutex_unlock(&gvt->lock); 150 mutex_unlock(&gvt->lock);
151 return ret; 151 return ret;
@@ -200,14 +200,13 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
200 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); 200 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
201 201
202 if (!vgpu->mmio.disable_warn_untrack) { 202 if (!vgpu->mmio.disable_warn_untrack) {
203 gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n", 203 gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n",
204 vgpu->id, offset, bytes, *(u32 *)p_data); 204 offset, bytes, *(u32 *)p_data);
205 205
206 if (offset == 0x206c) { 206 if (offset == 0x206c) {
207 gvt_err("------------------------------------------\n"); 207 gvt_vgpu_err("------------------------------------------\n");
208 gvt_err("vgpu%d: likely triggers a gfx reset\n", 208 gvt_vgpu_err("likely triggers a gfx reset\n");
209 vgpu->id); 209 gvt_vgpu_err("------------------------------------------\n");
210 gvt_err("------------------------------------------\n");
211 vgpu->mmio.disable_warn_untrack = true; 210 vgpu->mmio.disable_warn_untrack = true;
212 } 211 }
213 } 212 }
@@ -220,8 +219,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
220 mutex_unlock(&gvt->lock); 219 mutex_unlock(&gvt->lock);
221 return 0; 220 return 0;
222err: 221err:
223 gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n", 222 gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
224 vgpu->id, offset, bytes); 223 offset, bytes);
225 mutex_unlock(&gvt->lock); 224 mutex_unlock(&gvt->lock);
226 return ret; 225 return ret;
227} 226}
@@ -259,10 +258,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
259 if (gp) { 258 if (gp) {
260 ret = gp->handler(gp, pa, p_data, bytes); 259 ret = gp->handler(gp, pa, p_data, bytes);
261 if (ret) { 260 if (ret) {
262 gvt_err("vgpu%d: guest page write error %d, " 261 gvt_err("guest page write error %d, "
263 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", 262 "gfn 0x%lx, pa 0x%llx, "
264 vgpu->id, ret, 263 "var 0x%x, len %d\n",
265 gp->gfn, pa, *(u32 *)p_data, bytes); 264 ret, gp->gfn, pa,
265 *(u32 *)p_data, bytes);
266 } 266 }
267 mutex_unlock(&gvt->lock); 267 mutex_unlock(&gvt->lock);
268 return ret; 268 return ret;
@@ -329,8 +329,8 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
329 329
330 /* all register bits are RO. */ 330 /* all register bits are RO. */
331 if (ro_mask == ~(u64)0) { 331 if (ro_mask == ~(u64)0) {
332 gvt_err("vgpu%d: try to write RO reg %x\n", 332 gvt_vgpu_err("try to write RO reg %x\n",
333 vgpu->id, offset); 333 offset);
334 ret = 0; 334 ret = 0;
335 goto out; 335 goto out;
336 } 336 }
@@ -360,8 +360,8 @@ out:
360 mutex_unlock(&gvt->lock); 360 mutex_unlock(&gvt->lock);
361 return 0; 361 return 0;
362err: 362err:
363 gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n", 363 gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
364 vgpu->id, offset, bytes); 364 bytes);
365 mutex_unlock(&gvt->lock); 365 mutex_unlock(&gvt->lock);
366 return ret; 366 return ret;
367} 367}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 3bc620f56f35..a3a027025cd0 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -107,4 +107,7 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
107 void *p_data, unsigned int bytes); 107 void *p_data, unsigned int bytes);
108int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 108int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
109 void *p_data, unsigned int bytes); 109 void *p_data, unsigned int bytes);
110
111bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
112 unsigned int offset);
110#endif 113#endif
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 5d1caf9daba9..311799136d7f 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -67,14 +67,15 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
67 mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va 67 mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
68 + i * PAGE_SIZE); 68 + i * PAGE_SIZE);
69 if (mfn == INTEL_GVT_INVALID_ADDR) { 69 if (mfn == INTEL_GVT_INVALID_ADDR) {
70 gvt_err("fail to get MFN from VA\n"); 70 gvt_vgpu_err("fail to get MFN from VA\n");
71 return -EINVAL; 71 return -EINVAL;
72 } 72 }
73 ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, 73 ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
74 vgpu_opregion(vgpu)->gfn[i], 74 vgpu_opregion(vgpu)->gfn[i],
75 mfn, 1, map); 75 mfn, 1, map);
76 if (ret) { 76 if (ret) {
77 gvt_err("fail to map GFN to MFN, errno: %d\n", ret); 77 gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n",
78 ret);
78 return ret; 79 return ret;
79 } 80 }
80 } 81 }
@@ -287,7 +288,7 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
287 parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM; 288 parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
288 289
289 if (!(swsci & SWSCI_SCI_SELECT)) { 290 if (!(swsci & SWSCI_SCI_SELECT)) {
290 gvt_err("vgpu%d: requesting SMI service\n", vgpu->id); 291 gvt_vgpu_err("requesting SMI service\n");
291 return 0; 292 return 0;
292 } 293 }
293 /* ignore non 0->1 trasitions */ 294 /* ignore non 0->1 trasitions */
@@ -300,9 +301,8 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
300 func = GVT_OPREGION_FUNC(*scic); 301 func = GVT_OPREGION_FUNC(*scic);
301 subfunc = GVT_OPREGION_SUBFUNC(*scic); 302 subfunc = GVT_OPREGION_SUBFUNC(*scic);
302 if (!querying_capabilities(*scic)) { 303 if (!querying_capabilities(*scic)) {
303 gvt_err("vgpu%d: requesting runtime service: func \"%s\"," 304 gvt_vgpu_err("requesting runtime service: func \"%s\","
304 " subfunc \"%s\"\n", 305 " subfunc \"%s\"\n",
305 vgpu->id,
306 opregion_func_name(func), 306 opregion_func_name(func),
307 opregion_subfunc_name(subfunc)); 307 opregion_subfunc_name(subfunc));
308 /* 308 /*
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 73f052a4f424..0beb83563b08 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -167,7 +167,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
167 I915_WRITE_FW(reg, 0x1); 167 I915_WRITE_FW(reg, 0x1);
168 168
169 if (wait_for_atomic((I915_READ_FW(reg) == 0), 50)) 169 if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
170 gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id); 170 gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
171 else 171 else
172 vgpu_vreg(vgpu, regs[ring_id]) = 0; 172 vgpu_vreg(vgpu, regs[ring_id]) = 0;
173 173
@@ -207,7 +207,7 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
207 l3_offset.reg = 0xb020; 207 l3_offset.reg = 0xb020;
208 for (i = 0; i < 32; i++) { 208 for (i = 0; i < 32; i++) {
209 gen9_render_mocs_L3[i] = I915_READ(l3_offset); 209 gen9_render_mocs_L3[i] = I915_READ(l3_offset);
210 I915_WRITE(l3_offset, vgpu_vreg(vgpu, offset)); 210 I915_WRITE(l3_offset, vgpu_vreg(vgpu, l3_offset));
211 POSTING_READ(l3_offset); 211 POSTING_READ(l3_offset);
212 l3_offset.reg += 4; 212 l3_offset.reg += 4;
213 } 213 }
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 06c9584ac5f0..34b9acdf3479 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -101,7 +101,7 @@ struct tbs_sched_data {
101 struct list_head runq_head; 101 struct list_head runq_head;
102}; 102};
103 103
104#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000) 104#define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1))
105 105
106static void tbs_sched_func(struct work_struct *work) 106static void tbs_sched_func(struct work_struct *work)
107{ 107{
@@ -223,7 +223,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
223 return; 223 return;
224 224
225 list_add_tail(&vgpu_data->list, &sched_data->runq_head); 225 list_add_tail(&vgpu_data->list, &sched_data->runq_head);
226 schedule_delayed_work(&sched_data->work, sched_data->period); 226 schedule_delayed_work(&sched_data->work, 0);
227} 227}
228 228
229static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) 229static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index d3a56c949025..a44782412f2c 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -84,7 +84,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
84 (u32)((workload->ctx_desc.lrca + i) << 84 (u32)((workload->ctx_desc.lrca + i) <<
85 GTT_PAGE_SHIFT)); 85 GTT_PAGE_SHIFT));
86 if (context_gpa == INTEL_GVT_INVALID_ADDR) { 86 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
87 gvt_err("Invalid guest context descriptor\n"); 87 gvt_vgpu_err("Invalid guest context descriptor\n");
88 return -EINVAL; 88 return -EINVAL;
89 } 89 }
90 90
@@ -127,19 +127,22 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
127 return 0; 127 return 0;
128} 128}
129 129
130static inline bool is_gvt_request(struct drm_i915_gem_request *req)
131{
132 return i915_gem_context_force_single_submission(req->ctx);
133}
134
130static int shadow_context_status_change(struct notifier_block *nb, 135static int shadow_context_status_change(struct notifier_block *nb,
131 unsigned long action, void *data) 136 unsigned long action, void *data)
132{ 137{
133 struct intel_vgpu *vgpu = container_of(nb, 138 struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
134 struct intel_vgpu, shadow_ctx_notifier_block); 139 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
135 struct drm_i915_gem_request *req = 140 shadow_ctx_notifier_block[req->engine->id]);
136 (struct drm_i915_gem_request *)data; 141 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
137 struct intel_gvt_workload_scheduler *scheduler =
138 &vgpu->gvt->scheduler;
139 struct intel_vgpu_workload *workload = 142 struct intel_vgpu_workload *workload =
140 scheduler->current_workload[req->engine->id]; 143 scheduler->current_workload[req->engine->id];
141 144
142 if (unlikely(!workload)) 145 if (!is_gvt_request(req) || unlikely(!workload))
143 return NOTIFY_OK; 146 return NOTIFY_OK;
144 147
145 switch (action) { 148 switch (action) {
@@ -175,7 +178,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
175 int ring_id = workload->ring_id; 178 int ring_id = workload->ring_id;
176 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; 179 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
177 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; 180 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
181 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
178 struct drm_i915_gem_request *rq; 182 struct drm_i915_gem_request *rq;
183 struct intel_vgpu *vgpu = workload->vgpu;
179 int ret; 184 int ret;
180 185
181 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", 186 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
@@ -187,9 +192,24 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
187 192
188 mutex_lock(&dev_priv->drm.struct_mutex); 193 mutex_lock(&dev_priv->drm.struct_mutex);
189 194
195 /* pin shadow context by gvt even the shadow context will be pinned
196 * when i915 alloc request. That is because gvt will update the guest
197 * context from shadow context when workload is completed, and at that
198 * moment, i915 may already unpined the shadow context to make the
199 * shadow_ctx pages invalid. So gvt need to pin itself. After update
200 * the guest context, gvt can unpin the shadow_ctx safely.
201 */
202 ret = engine->context_pin(engine, shadow_ctx);
203 if (ret) {
204 gvt_vgpu_err("fail to pin shadow context\n");
205 workload->status = ret;
206 mutex_unlock(&dev_priv->drm.struct_mutex);
207 return ret;
208 }
209
190 rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); 210 rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
191 if (IS_ERR(rq)) { 211 if (IS_ERR(rq)) {
192 gvt_err("fail to allocate gem request\n"); 212 gvt_vgpu_err("fail to allocate gem request\n");
193 ret = PTR_ERR(rq); 213 ret = PTR_ERR(rq);
194 goto out; 214 goto out;
195 } 215 }
@@ -202,9 +222,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
202 if (ret) 222 if (ret)
203 goto out; 223 goto out;
204 224
205 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); 225 if ((workload->ring_id == RCS) &&
206 if (ret) 226 (workload->wa_ctx.indirect_ctx.size != 0)) {
207 goto out; 227 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
228 if (ret)
229 goto out;
230 }
208 231
209 ret = populate_shadow_context(workload); 232 ret = populate_shadow_context(workload);
210 if (ret) 233 if (ret)
@@ -227,6 +250,9 @@ out:
227 250
228 if (!IS_ERR_OR_NULL(rq)) 251 if (!IS_ERR_OR_NULL(rq))
229 i915_add_request_no_flush(rq); 252 i915_add_request_no_flush(rq);
253 else
254 engine->context_unpin(engine, shadow_ctx);
255
230 mutex_unlock(&dev_priv->drm.struct_mutex); 256 mutex_unlock(&dev_priv->drm.struct_mutex);
231 return ret; 257 return ret;
232} 258}
@@ -322,7 +348,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
322 (u32)((workload->ctx_desc.lrca + i) << 348 (u32)((workload->ctx_desc.lrca + i) <<
323 GTT_PAGE_SHIFT)); 349 GTT_PAGE_SHIFT));
324 if (context_gpa == INTEL_GVT_INVALID_ADDR) { 350 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
325 gvt_err("invalid guest context descriptor\n"); 351 gvt_vgpu_err("invalid guest context descriptor\n");
326 return; 352 return;
327 } 353 }
328 354
@@ -376,6 +402,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
376 * For the workload w/o request, directly complete the workload. 402 * For the workload w/o request, directly complete the workload.
377 */ 403 */
378 if (workload->req) { 404 if (workload->req) {
405 struct drm_i915_private *dev_priv =
406 workload->vgpu->gvt->dev_priv;
407 struct intel_engine_cs *engine =
408 dev_priv->engine[workload->ring_id];
379 wait_event(workload->shadow_ctx_status_wq, 409 wait_event(workload->shadow_ctx_status_wq,
380 !atomic_read(&workload->shadow_ctx_active)); 410 !atomic_read(&workload->shadow_ctx_active));
381 411
@@ -388,6 +418,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
388 INTEL_GVT_EVENT_MAX) 418 INTEL_GVT_EVENT_MAX)
389 intel_vgpu_trigger_virtual_event(vgpu, event); 419 intel_vgpu_trigger_virtual_event(vgpu, event);
390 } 420 }
421 mutex_lock(&dev_priv->drm.struct_mutex);
422 /* unpin shadow ctx as the shadow_ctx update is done */
423 engine->context_unpin(engine, workload->vgpu->shadow_ctx);
424 mutex_unlock(&dev_priv->drm.struct_mutex);
391 } 425 }
392 426
393 gvt_dbg_sched("ring id %d complete workload %p status %d\n", 427 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -417,6 +451,7 @@ static int workload_thread(void *priv)
417 int ring_id = p->ring_id; 451 int ring_id = p->ring_id;
418 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 452 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
419 struct intel_vgpu_workload *workload = NULL; 453 struct intel_vgpu_workload *workload = NULL;
454 struct intel_vgpu *vgpu = NULL;
420 int ret; 455 int ret;
421 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); 456 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
422 DEFINE_WAIT_FUNC(wait, woken_wake_function); 457 DEFINE_WAIT_FUNC(wait, woken_wake_function);
@@ -459,25 +494,14 @@ static int workload_thread(void *priv)
459 mutex_unlock(&gvt->lock); 494 mutex_unlock(&gvt->lock);
460 495
461 if (ret) { 496 if (ret) {
462 gvt_err("fail to dispatch workload, skip\n"); 497 vgpu = workload->vgpu;
498 gvt_vgpu_err("fail to dispatch workload, skip\n");
463 goto complete; 499 goto complete;
464 } 500 }
465 501
466 gvt_dbg_sched("ring id %d wait workload %p\n", 502 gvt_dbg_sched("ring id %d wait workload %p\n",
467 workload->ring_id, workload); 503 workload->ring_id, workload);
468retry: 504 i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
469 i915_wait_request(workload->req,
470 0, MAX_SCHEDULE_TIMEOUT);
471 /* I915 has replay mechanism and a request will be replayed
472 * if there is i915 reset. So the seqno will be updated anyway.
473 * If the seqno is not updated yet after waiting, which means
474 * the replay may still be in progress and we can wait again.
475 */
476 if (!i915_gem_request_completed(workload->req)) {
477 gvt_dbg_sched("workload %p not completed, wait again\n",
478 workload);
479 goto retry;
480 }
481 505
482complete: 506complete:
483 gvt_dbg_sched("will complete workload %p, status: %d\n", 507 gvt_dbg_sched("will complete workload %p, status: %d\n",
@@ -513,15 +537,16 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
513void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt) 537void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
514{ 538{
515 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 539 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
516 int i; 540 struct intel_engine_cs *engine;
541 enum intel_engine_id i;
517 542
518 gvt_dbg_core("clean workload scheduler\n"); 543 gvt_dbg_core("clean workload scheduler\n");
519 544
520 for (i = 0; i < I915_NUM_ENGINES; i++) { 545 for_each_engine(engine, gvt->dev_priv, i) {
521 if (scheduler->thread[i]) { 546 atomic_notifier_chain_unregister(
522 kthread_stop(scheduler->thread[i]); 547 &engine->context_status_notifier,
523 scheduler->thread[i] = NULL; 548 &gvt->shadow_ctx_notifier_block[i]);
524 } 549 kthread_stop(scheduler->thread[i]);
525 } 550 }
526} 551}
527 552
@@ -529,18 +554,15 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
529{ 554{
530 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 555 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
531 struct workload_thread_param *param = NULL; 556 struct workload_thread_param *param = NULL;
557 struct intel_engine_cs *engine;
558 enum intel_engine_id i;
532 int ret; 559 int ret;
533 int i;
534 560
535 gvt_dbg_core("init workload scheduler\n"); 561 gvt_dbg_core("init workload scheduler\n");
536 562
537 init_waitqueue_head(&scheduler->workload_complete_wq); 563 init_waitqueue_head(&scheduler->workload_complete_wq);
538 564
539 for (i = 0; i < I915_NUM_ENGINES; i++) { 565 for_each_engine(engine, gvt->dev_priv, i) {
540 /* check ring mask at init time */
541 if (!HAS_ENGINE(gvt->dev_priv, i))
542 continue;
543
544 init_waitqueue_head(&scheduler->waitq[i]); 566 init_waitqueue_head(&scheduler->waitq[i]);
545 567
546 param = kzalloc(sizeof(*param), GFP_KERNEL); 568 param = kzalloc(sizeof(*param), GFP_KERNEL);
@@ -559,6 +581,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
559 ret = PTR_ERR(scheduler->thread[i]); 581 ret = PTR_ERR(scheduler->thread[i]);
560 goto err; 582 goto err;
561 } 583 }
584
585 gvt->shadow_ctx_notifier_block[i].notifier_call =
586 shadow_context_status_change;
587 atomic_notifier_chain_register(&engine->context_status_notifier,
588 &gvt->shadow_ctx_notifier_block[i]);
562 } 589 }
563 return 0; 590 return 0;
564err: 591err:
@@ -570,9 +597,6 @@ err:
570 597
571void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu) 598void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
572{ 599{
573 atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
574 &vgpu->shadow_ctx_notifier_block);
575
576 i915_gem_context_put_unlocked(vgpu->shadow_ctx); 600 i915_gem_context_put_unlocked(vgpu->shadow_ctx);
577} 601}
578 602
@@ -587,10 +611,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
587 611
588 vgpu->shadow_ctx->engine[RCS].initialised = true; 612 vgpu->shadow_ctx->engine[RCS].initialised = true;
589 613
590 vgpu->shadow_ctx_notifier_block.notifier_call =
591 shadow_context_status_change;
592
593 atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
594 &vgpu->shadow_ctx_notifier_block);
595 return 0; 614 return 0;
596} 615}
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 41cfa5ccae84..649ef280cc9a 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -72,7 +72,7 @@ static struct {
72 char *name; 72 char *name;
73} vgpu_types[] = { 73} vgpu_types[] = {
74/* Fixed vGPU type table */ 74/* Fixed vGPU type table */
75 { MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" }, 75 { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, GVT_EDID_1024_768, "8" },
76 { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" }, 76 { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" },
77 { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" }, 77 { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" },
78 { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" }, 78 { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" },
@@ -179,20 +179,34 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
179} 179}
180 180
181/** 181/**
182 * intel_gvt_destroy_vgpu - destroy a virtual GPU 182 * intel_gvt_active_vgpu - activate a virtual GPU
183 * @vgpu: virtual GPU 183 * @vgpu: virtual GPU
184 * 184 *
185 * This function is called when user wants to destroy a virtual GPU. 185 * This function is called when user wants to activate a virtual GPU.
186 * 186 *
187 */ 187 */
188void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) 188void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
189{
190 mutex_lock(&vgpu->gvt->lock);
191 vgpu->active = true;
192 mutex_unlock(&vgpu->gvt->lock);
193}
194
195/**
196 * intel_gvt_deactive_vgpu - deactivate a virtual GPU
197 * @vgpu: virtual GPU
198 *
199 * This function is called when user wants to deactivate a virtual GPU.
200 * All virtual GPU runtime information will be destroyed.
201 *
202 */
203void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
189{ 204{
190 struct intel_gvt *gvt = vgpu->gvt; 205 struct intel_gvt *gvt = vgpu->gvt;
191 206
192 mutex_lock(&gvt->lock); 207 mutex_lock(&gvt->lock);
193 208
194 vgpu->active = false; 209 vgpu->active = false;
195 idr_remove(&gvt->vgpu_idr, vgpu->id);
196 210
197 if (atomic_read(&vgpu->running_workload_num)) { 211 if (atomic_read(&vgpu->running_workload_num)) {
198 mutex_unlock(&gvt->lock); 212 mutex_unlock(&gvt->lock);
@@ -201,6 +215,26 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
201 } 215 }
202 216
203 intel_vgpu_stop_schedule(vgpu); 217 intel_vgpu_stop_schedule(vgpu);
218
219 mutex_unlock(&gvt->lock);
220}
221
222/**
223 * intel_gvt_destroy_vgpu - destroy a virtual GPU
224 * @vgpu: virtual GPU
225 *
226 * This function is called when user wants to destroy a virtual GPU.
227 *
228 */
229void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
230{
231 struct intel_gvt *gvt = vgpu->gvt;
232
233 mutex_lock(&gvt->lock);
234
235 WARN(vgpu->active, "vGPU is still active!\n");
236
237 idr_remove(&gvt->vgpu_idr, vgpu->id);
204 intel_vgpu_clean_sched_policy(vgpu); 238 intel_vgpu_clean_sched_policy(vgpu);
205 intel_vgpu_clean_gvt_context(vgpu); 239 intel_vgpu_clean_gvt_context(vgpu);
206 intel_vgpu_clean_execlist(vgpu); 240 intel_vgpu_clean_execlist(vgpu);
@@ -277,7 +311,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
277 if (ret) 311 if (ret)
278 goto out_clean_shadow_ctx; 312 goto out_clean_shadow_ctx;
279 313
280 vgpu->active = true;
281 mutex_unlock(&gvt->lock); 314 mutex_unlock(&gvt->lock);
282 315
283 return vgpu; 316 return vgpu;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e703556eba99..5c089b3c2a7e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -248,6 +248,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
248 case I915_PARAM_IRQ_ACTIVE: 248 case I915_PARAM_IRQ_ACTIVE:
249 case I915_PARAM_ALLOW_BATCHBUFFER: 249 case I915_PARAM_ALLOW_BATCHBUFFER:
250 case I915_PARAM_LAST_DISPATCH: 250 case I915_PARAM_LAST_DISPATCH:
251 case I915_PARAM_HAS_EXEC_CONSTANTS:
251 /* Reject all old ums/dri params. */ 252 /* Reject all old ums/dri params. */
252 return -ENODEV; 253 return -ENODEV;
253 case I915_PARAM_CHIPSET_ID: 254 case I915_PARAM_CHIPSET_ID:
@@ -274,9 +275,6 @@ static int i915_getparam(struct drm_device *dev, void *data,
274 case I915_PARAM_HAS_BSD2: 275 case I915_PARAM_HAS_BSD2:
275 value = !!dev_priv->engine[VCS2]; 276 value = !!dev_priv->engine[VCS2];
276 break; 277 break;
277 case I915_PARAM_HAS_EXEC_CONSTANTS:
278 value = INTEL_GEN(dev_priv) >= 4;
279 break;
280 case I915_PARAM_HAS_LLC: 278 case I915_PARAM_HAS_LLC:
281 value = HAS_LLC(dev_priv); 279 value = HAS_LLC(dev_priv);
282 break; 280 break;
@@ -1436,8 +1434,6 @@ static int i915_drm_suspend(struct drm_device *dev)
1436 goto out; 1434 goto out;
1437 } 1435 }
1438 1436
1439 intel_guc_suspend(dev_priv);
1440
1441 intel_display_suspend(dev); 1437 intel_display_suspend(dev);
1442 1438
1443 intel_dp_mst_suspend(dev); 1439 intel_dp_mst_suspend(dev);
@@ -1788,7 +1784,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
1788 goto error; 1784 goto error;
1789 } 1785 }
1790 1786
1791 i915_gem_reset_finish(dev_priv); 1787 i915_gem_reset(dev_priv);
1792 intel_overlay_reset(dev_priv); 1788 intel_overlay_reset(dev_priv);
1793 1789
1794 /* Ok, now get things going again... */ 1790 /* Ok, now get things going again... */
@@ -1814,6 +1810,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
1814 i915_queue_hangcheck(dev_priv); 1810 i915_queue_hangcheck(dev_priv);
1815 1811
1816wakeup: 1812wakeup:
1813 i915_gem_reset_finish(dev_priv);
1817 enable_irq(dev_priv->drm.irq); 1814 enable_irq(dev_priv->drm.irq);
1818 wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS); 1815 wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
1819 return; 1816 return;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7febe6eecf72..46fcd8b7080a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -806,6 +806,7 @@ struct intel_csr {
806 func(has_resource_streamer); \ 806 func(has_resource_streamer); \
807 func(has_runtime_pm); \ 807 func(has_runtime_pm); \
808 func(has_snoop); \ 808 func(has_snoop); \
809 func(unfenced_needs_alignment); \
809 func(cursor_needs_physical); \ 810 func(cursor_needs_physical); \
810 func(hws_needs_physical); \ 811 func(hws_needs_physical); \
811 func(overlay_needs_physical); \ 812 func(overlay_needs_physical); \
@@ -1325,7 +1326,7 @@ struct intel_gen6_power_mgmt {
1325 unsigned boosts; 1326 unsigned boosts;
1326 1327
1327 /* manual wa residency calculations */ 1328 /* manual wa residency calculations */
1328 struct intel_rps_ei up_ei, down_ei; 1329 struct intel_rps_ei ei;
1329 1330
1330 /* 1331 /*
1331 * Protects RPS/RC6 register access and PCU communication. 1332 * Protects RPS/RC6 register access and PCU communication.
@@ -2064,8 +2065,6 @@ struct drm_i915_private {
2064 2065
2065 const struct intel_device_info info; 2066 const struct intel_device_info info;
2066 2067
2067 int relative_constants_mode;
2068
2069 void __iomem *regs; 2068 void __iomem *regs;
2070 2069
2071 struct intel_uncore uncore; 2070 struct intel_uncore uncore;
@@ -3342,6 +3341,7 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
3342} 3341}
3343 3342
3344int i915_gem_reset_prepare(struct drm_i915_private *dev_priv); 3343int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
3344void i915_gem_reset(struct drm_i915_private *dev_priv);
3345void i915_gem_reset_finish(struct drm_i915_private *dev_priv); 3345void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
3346void i915_gem_set_wedged(struct drm_i915_private *dev_priv); 3346void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
3347void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); 3347void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 10777da73039..fe531f904062 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2719,7 +2719,16 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
2719 for_each_engine(engine, dev_priv, id) { 2719 for_each_engine(engine, dev_priv, id) {
2720 struct drm_i915_gem_request *request; 2720 struct drm_i915_gem_request *request;
2721 2721
2722 /* Prevent request submission to the hardware until we have
2723 * completed the reset in i915_gem_reset_finish(). If a request
2724 * is completed by one engine, it may then queue a request
2725 * to a second via its engine->irq_tasklet *just* as we are
2726 * calling engine->init_hw() and also writing the ELSP.
2727 * Turning off the engine->irq_tasklet until the reset is over
2728 * prevents the race.
2729 */
2722 tasklet_kill(&engine->irq_tasklet); 2730 tasklet_kill(&engine->irq_tasklet);
2731 tasklet_disable(&engine->irq_tasklet);
2723 2732
2724 if (engine_stalled(engine)) { 2733 if (engine_stalled(engine)) {
2725 request = i915_gem_find_active_request(engine); 2734 request = i915_gem_find_active_request(engine);
@@ -2834,7 +2843,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
2834 engine->reset_hw(engine, request); 2843 engine->reset_hw(engine, request);
2835} 2844}
2836 2845
2837void i915_gem_reset_finish(struct drm_i915_private *dev_priv) 2846void i915_gem_reset(struct drm_i915_private *dev_priv)
2838{ 2847{
2839 struct intel_engine_cs *engine; 2848 struct intel_engine_cs *engine;
2840 enum intel_engine_id id; 2849 enum intel_engine_id id;
@@ -2856,6 +2865,17 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
2856 } 2865 }
2857} 2866}
2858 2867
2868void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
2869{
2870 struct intel_engine_cs *engine;
2871 enum intel_engine_id id;
2872
2873 lockdep_assert_held(&dev_priv->drm.struct_mutex);
2874
2875 for_each_engine(engine, dev_priv, id)
2876 tasklet_enable(&engine->irq_tasklet);
2877}
2878
2859static void nop_submit_request(struct drm_i915_gem_request *request) 2879static void nop_submit_request(struct drm_i915_gem_request *request)
2860{ 2880{
2861 dma_fence_set_error(&request->fence, -EIO); 2881 dma_fence_set_error(&request->fence, -EIO);
@@ -4328,6 +4348,8 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
4328 i915_gem_context_lost(dev_priv); 4348 i915_gem_context_lost(dev_priv);
4329 mutex_unlock(&dev->struct_mutex); 4349 mutex_unlock(&dev->struct_mutex);
4330 4350
4351 intel_guc_suspend(dev_priv);
4352
4331 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 4353 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4332 cancel_delayed_work_sync(&dev_priv->gt.retire_work); 4354 cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4333 4355
@@ -4674,8 +4696,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
4674 init_waitqueue_head(&dev_priv->gpu_error.wait_queue); 4696 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
4675 init_waitqueue_head(&dev_priv->gpu_error.reset_queue); 4697 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4676 4698
4677 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4678
4679 init_waitqueue_head(&dev_priv->pending_flip_queue); 4699 init_waitqueue_head(&dev_priv->pending_flip_queue);
4680 4700
4681 dev_priv->mm.interruptible = true; 4701 dev_priv->mm.interruptible = true;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 17f90c618208..e2d83b6d376b 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -311,7 +311,6 @@ __create_hw_context(struct drm_i915_private *dev_priv,
311 ctx->ring_size = 4 * PAGE_SIZE; 311 ctx->ring_size = 4 * PAGE_SIZE;
312 ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) << 312 ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
313 GEN8_CTX_ADDRESSING_MODE_SHIFT; 313 GEN8_CTX_ADDRESSING_MODE_SHIFT;
314 ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
315 314
316 /* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not 315 /* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
317 * present or not in use we still need a small bias as ring wraparound 316 * present or not in use we still need a small bias as ring wraparound
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 0ac750b90f3d..e9c008fe14b1 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -160,9 +160,6 @@ struct i915_gem_context {
160 /** desc_template: invariant fields for the HW context descriptor */ 160 /** desc_template: invariant fields for the HW context descriptor */
161 u32 desc_template; 161 u32 desc_template;
162 162
163 /** status_notifier: list of callbacks for context-switch changes */
164 struct atomic_notifier_head status_notifier;
165
166 /** guilty_count: How many times this context has caused a GPU hang. */ 163 /** guilty_count: How many times this context has caused a GPU hang. */
167 unsigned int guilty_count; 164 unsigned int guilty_count;
168 /** 165 /**
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d02cfaefe1c8..15a15d00a6bf 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -888,6 +888,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
888 struct list_head ordered_vmas; 888 struct list_head ordered_vmas;
889 struct list_head pinned_vmas; 889 struct list_head pinned_vmas;
890 bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4; 890 bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
891 bool needs_unfenced_map = INTEL_INFO(engine->i915)->unfenced_needs_alignment;
891 int retry; 892 int retry;
892 893
893 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; 894 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
@@ -908,7 +909,8 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
908 if (!has_fenced_gpu_access) 909 if (!has_fenced_gpu_access)
909 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE; 910 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
910 need_fence = 911 need_fence =
911 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 912 (entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
913 needs_unfenced_map) &&
912 i915_gem_object_is_tiled(obj); 914 i915_gem_object_is_tiled(obj);
913 need_mappable = need_fence || need_reloc_mappable(vma); 915 need_mappable = need_fence || need_reloc_mappable(vma);
914 916
@@ -1408,10 +1410,7 @@ execbuf_submit(struct i915_execbuffer_params *params,
1408 struct drm_i915_gem_execbuffer2 *args, 1410 struct drm_i915_gem_execbuffer2 *args,
1409 struct list_head *vmas) 1411 struct list_head *vmas)
1410{ 1412{
1411 struct drm_i915_private *dev_priv = params->request->i915;
1412 u64 exec_start, exec_len; 1413 u64 exec_start, exec_len;
1413 int instp_mode;
1414 u32 instp_mask;
1415 int ret; 1414 int ret;
1416 1415
1417 ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas); 1416 ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
@@ -1422,56 +1421,11 @@ execbuf_submit(struct i915_execbuffer_params *params,
1422 if (ret) 1421 if (ret)
1423 return ret; 1422 return ret;
1424 1423
1425 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; 1424 if (args->flags & I915_EXEC_CONSTANTS_MASK) {
1426 instp_mask = I915_EXEC_CONSTANTS_MASK; 1425 DRM_DEBUG("I915_EXEC_CONSTANTS_* unsupported\n");
1427 switch (instp_mode) {
1428 case I915_EXEC_CONSTANTS_REL_GENERAL:
1429 case I915_EXEC_CONSTANTS_ABSOLUTE:
1430 case I915_EXEC_CONSTANTS_REL_SURFACE:
1431 if (instp_mode != 0 && params->engine->id != RCS) {
1432 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1433 return -EINVAL;
1434 }
1435
1436 if (instp_mode != dev_priv->relative_constants_mode) {
1437 if (INTEL_INFO(dev_priv)->gen < 4) {
1438 DRM_DEBUG("no rel constants on pre-gen4\n");
1439 return -EINVAL;
1440 }
1441
1442 if (INTEL_INFO(dev_priv)->gen > 5 &&
1443 instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1444 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1445 return -EINVAL;
1446 }
1447
1448 /* The HW changed the meaning on this bit on gen6 */
1449 if (INTEL_INFO(dev_priv)->gen >= 6)
1450 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1451 }
1452 break;
1453 default:
1454 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1455 return -EINVAL; 1426 return -EINVAL;
1456 } 1427 }
1457 1428
1458 if (params->engine->id == RCS &&
1459 instp_mode != dev_priv->relative_constants_mode) {
1460 struct intel_ring *ring = params->request->ring;
1461
1462 ret = intel_ring_begin(params->request, 4);
1463 if (ret)
1464 return ret;
1465
1466 intel_ring_emit(ring, MI_NOOP);
1467 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1468 intel_ring_emit_reg(ring, INSTPM);
1469 intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1470 intel_ring_advance(ring);
1471
1472 dev_priv->relative_constants_mode = instp_mode;
1473 }
1474
1475 if (args->flags & I915_EXEC_GEN7_SOL_RESET) { 1429 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1476 ret = i915_reset_gen7_sol_offsets(params->request); 1430 ret = i915_reset_gen7_sol_offsets(params->request);
1477 if (ret) 1431 if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2801a4d56324..96e45a4d5441 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2704,7 +2704,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2704 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2704 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2705 2705
2706 if (unlikely(ggtt->do_idle_maps)) { 2706 if (unlikely(ggtt->do_idle_maps)) {
2707 if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) { 2707 if (i915_gem_wait_for_idle(dev_priv, 0)) {
2708 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); 2708 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2709 /* Wait a bit, in hopes it avoids the hang */ 2709 /* Wait a bit, in hopes it avoids the hang */
2710 udelay(10); 2710 udelay(10);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index e7c3c0318ff6..da70bfe97ec5 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -37,6 +37,17 @@ static const char *i915_fence_get_driver_name(struct dma_fence *fence)
37 37
38static const char *i915_fence_get_timeline_name(struct dma_fence *fence) 38static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
39{ 39{
40 /* The timeline struct (as part of the ppgtt underneath a context)
41 * may be freed when the request is no longer in use by the GPU.
42 * We could extend the life of a context to beyond that of all
43 * fences, possibly keeping the hw resource around indefinitely,
44 * or we just give them a false name. Since
45 * dma_fence_ops.get_timeline_name is a debug feature, the occasional
46 * lie seems justifiable.
47 */
48 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
49 return "signaled";
50
40 return to_request(fence)->timeline->common->name; 51 return to_request(fence)->timeline->common->name;
41} 52}
42 53
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 401006b4c6a3..70b3832a79dd 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -53,6 +53,17 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
53 BUG(); 53 BUG();
54} 54}
55 55
56static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
57{
58 if (!unlock)
59 return;
60
61 mutex_unlock(&dev->struct_mutex);
62
63 /* expedite the RCU grace period to free some request slabs */
64 synchronize_rcu_expedited();
65}
66
56static bool any_vma_pinned(struct drm_i915_gem_object *obj) 67static bool any_vma_pinned(struct drm_i915_gem_object *obj)
57{ 68{
58 struct i915_vma *vma; 69 struct i915_vma *vma;
@@ -232,11 +243,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
232 intel_runtime_pm_put(dev_priv); 243 intel_runtime_pm_put(dev_priv);
233 244
234 i915_gem_retire_requests(dev_priv); 245 i915_gem_retire_requests(dev_priv);
235 if (unlock)
236 mutex_unlock(&dev_priv->drm.struct_mutex);
237 246
238 /* expedite the RCU grace period to free some request slabs */ 247 i915_gem_shrinker_unlock(&dev_priv->drm, unlock);
239 synchronize_rcu_expedited();
240 248
241 return count; 249 return count;
242} 250}
@@ -263,7 +271,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
263 I915_SHRINK_BOUND | 271 I915_SHRINK_BOUND |
264 I915_SHRINK_UNBOUND | 272 I915_SHRINK_UNBOUND |
265 I915_SHRINK_ACTIVE); 273 I915_SHRINK_ACTIVE);
266 rcu_barrier(); /* wait until our RCU delayed slab frees are completed */ 274 synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
267 275
268 return freed; 276 return freed;
269} 277}
@@ -293,8 +301,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
293 count += obj->base.size >> PAGE_SHIFT; 301 count += obj->base.size >> PAGE_SHIFT;
294 } 302 }
295 303
296 if (unlock) 304 i915_gem_shrinker_unlock(dev, unlock);
297 mutex_unlock(&dev->struct_mutex);
298 305
299 return count; 306 return count;
300} 307}
@@ -321,8 +328,8 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
321 sc->nr_to_scan - freed, 328 sc->nr_to_scan - freed,
322 I915_SHRINK_BOUND | 329 I915_SHRINK_BOUND |
323 I915_SHRINK_UNBOUND); 330 I915_SHRINK_UNBOUND);
324 if (unlock) 331
325 mutex_unlock(&dev->struct_mutex); 332 i915_gem_shrinker_unlock(dev, unlock);
326 333
327 return freed; 334 return freed;
328} 335}
@@ -364,8 +371,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
364 struct shrinker_lock_uninterruptible *slu) 371 struct shrinker_lock_uninterruptible *slu)
365{ 372{
366 dev_priv->mm.interruptible = slu->was_interruptible; 373 dev_priv->mm.interruptible = slu->was_interruptible;
367 if (slu->unlock) 374 i915_gem_shrinker_unlock(&dev_priv->drm, slu->unlock);
368 mutex_unlock(&dev_priv->drm.struct_mutex);
369} 375}
370 376
371static int 377static int
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index e6ffef2f707a..b6c886ac901b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1046,68 +1046,51 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv,
1046 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1046 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1047} 1047}
1048 1048
1049static bool vlv_c0_above(struct drm_i915_private *dev_priv,
1050 const struct intel_rps_ei *old,
1051 const struct intel_rps_ei *now,
1052 int threshold)
1053{
1054 u64 time, c0;
1055 unsigned int mul = 100;
1056
1057 if (old->cz_clock == 0)
1058 return false;
1059
1060 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1061 mul <<= 8;
1062
1063 time = now->cz_clock - old->cz_clock;
1064 time *= threshold * dev_priv->czclk_freq;
1065
1066 /* Workload can be split between render + media, e.g. SwapBuffers
1067 * being blitted in X after being rendered in mesa. To account for
1068 * this we need to combine both engines into our activity counter.
1069 */
1070 c0 = now->render_c0 - old->render_c0;
1071 c0 += now->media_c0 - old->media_c0;
1072 c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1073
1074 return c0 >= time;
1075}
1076
1077void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1049void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1078{ 1050{
1079 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); 1051 memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
1080 dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1081} 1052}
1082 1053
1083static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1054static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1084{ 1055{
1056 const struct intel_rps_ei *prev = &dev_priv->rps.ei;
1085 struct intel_rps_ei now; 1057 struct intel_rps_ei now;
1086 u32 events = 0; 1058 u32 events = 0;
1087 1059
1088 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) 1060 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1089 return 0; 1061 return 0;
1090 1062
1091 vlv_c0_read(dev_priv, &now); 1063 vlv_c0_read(dev_priv, &now);
1092 if (now.cz_clock == 0) 1064 if (now.cz_clock == 0)
1093 return 0; 1065 return 0;
1094 1066
1095 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { 1067 if (prev->cz_clock) {
1096 if (!vlv_c0_above(dev_priv, 1068 u64 time, c0;
1097 &dev_priv->rps.down_ei, &now, 1069 unsigned int mul;
1098 dev_priv->rps.down_threshold)) 1070
1099 events |= GEN6_PM_RP_DOWN_THRESHOLD; 1071 mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
1100 dev_priv->rps.down_ei = now; 1072 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1101 } 1073 mul <<= 8;
1102 1074
1103 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { 1075 time = now.cz_clock - prev->cz_clock;
1104 if (vlv_c0_above(dev_priv, 1076 time *= dev_priv->czclk_freq;
1105 &dev_priv->rps.up_ei, &now, 1077
1106 dev_priv->rps.up_threshold)) 1078 /* Workload can be split between render + media,
1107 events |= GEN6_PM_RP_UP_THRESHOLD; 1079 * e.g. SwapBuffers being blitted in X after being rendered in
1108 dev_priv->rps.up_ei = now; 1080 * mesa. To account for this we need to combine both engines
1081 * into our activity counter.
1082 */
1083 c0 = now.render_c0 - prev->render_c0;
1084 c0 += now.media_c0 - prev->media_c0;
1085 c0 *= mul;
1086
1087 if (c0 > time * dev_priv->rps.up_threshold)
1088 events = GEN6_PM_RP_UP_THRESHOLD;
1089 else if (c0 < time * dev_priv->rps.down_threshold)
1090 events = GEN6_PM_RP_DOWN_THRESHOLD;
1109 } 1091 }
1110 1092
1093 dev_priv->rps.ei = now;
1111 return events; 1094 return events;
1112} 1095}
1113 1096
@@ -4228,7 +4211,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4228 /* Let's track the enabled rps events */ 4211 /* Let's track the enabled rps events */
4229 if (IS_VALLEYVIEW(dev_priv)) 4212 if (IS_VALLEYVIEW(dev_priv))
4230 /* WaGsvRC0ResidencyMethod:vlv */ 4213 /* WaGsvRC0ResidencyMethod:vlv */
4231 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; 4214 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4232 else 4215 else
4233 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4216 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4234 4217
@@ -4266,6 +4249,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4266 if (!IS_GEN2(dev_priv)) 4249 if (!IS_GEN2(dev_priv))
4267 dev->vblank_disable_immediate = true; 4250 dev->vblank_disable_immediate = true;
4268 4251
4252 /* Most platforms treat the display irq block as an always-on
4253 * power domain. vlv/chv can disable it at runtime and need
4254 * special care to avoid writing any of the display block registers
4255 * outside of the power domain. We defer setting up the display irqs
4256 * in this case to the runtime pm.
4257 */
4258 dev_priv->display_irqs_enabled = true;
4259 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4260 dev_priv->display_irqs_enabled = false;
4261
4269 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4262 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4270 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4263 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4271 4264
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index ecb487b5356f..9bbbd4e83e3c 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -60,6 +60,7 @@
60 .has_overlay = 1, .overlay_needs_physical = 1, \ 60 .has_overlay = 1, .overlay_needs_physical = 1, \
61 .has_gmch_display = 1, \ 61 .has_gmch_display = 1, \
62 .hws_needs_physical = 1, \ 62 .hws_needs_physical = 1, \
63 .unfenced_needs_alignment = 1, \
63 .ring_mask = RENDER_RING, \ 64 .ring_mask = RENDER_RING, \
64 GEN_DEFAULT_PIPEOFFSETS, \ 65 GEN_DEFAULT_PIPEOFFSETS, \
65 CURSOR_OFFSETS 66 CURSOR_OFFSETS
@@ -101,6 +102,7 @@ static const struct intel_device_info intel_i915g_info = {
101 .platform = INTEL_I915G, .cursor_needs_physical = 1, 102 .platform = INTEL_I915G, .cursor_needs_physical = 1,
102 .has_overlay = 1, .overlay_needs_physical = 1, 103 .has_overlay = 1, .overlay_needs_physical = 1,
103 .hws_needs_physical = 1, 104 .hws_needs_physical = 1,
105 .unfenced_needs_alignment = 1,
104}; 106};
105 107
106static const struct intel_device_info intel_i915gm_info = { 108static const struct intel_device_info intel_i915gm_info = {
@@ -112,6 +114,7 @@ static const struct intel_device_info intel_i915gm_info = {
112 .supports_tv = 1, 114 .supports_tv = 1,
113 .has_fbc = 1, 115 .has_fbc = 1,
114 .hws_needs_physical = 1, 116 .hws_needs_physical = 1,
117 .unfenced_needs_alignment = 1,
115}; 118};
116 119
117static const struct intel_device_info intel_i945g_info = { 120static const struct intel_device_info intel_i945g_info = {
@@ -120,6 +123,7 @@ static const struct intel_device_info intel_i945g_info = {
120 .has_hotplug = 1, .cursor_needs_physical = 1, 123 .has_hotplug = 1, .cursor_needs_physical = 1,
121 .has_overlay = 1, .overlay_needs_physical = 1, 124 .has_overlay = 1, .overlay_needs_physical = 1,
122 .hws_needs_physical = 1, 125 .hws_needs_physical = 1,
126 .unfenced_needs_alignment = 1,
123}; 127};
124 128
125static const struct intel_device_info intel_i945gm_info = { 129static const struct intel_device_info intel_i945gm_info = {
@@ -130,6 +134,7 @@ static const struct intel_device_info intel_i945gm_info = {
130 .supports_tv = 1, 134 .supports_tv = 1,
131 .has_fbc = 1, 135 .has_fbc = 1,
132 .hws_needs_physical = 1, 136 .hws_needs_physical = 1,
137 .unfenced_needs_alignment = 1,
133}; 138};
134 139
135static const struct intel_device_info intel_g33_info = { 140static const struct intel_device_info intel_g33_info = {
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index a1b7eec58be2..70964ca9251e 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1705,7 +1705,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
1705 */ 1705 */
1706 if (WARN_ON(stream->sample_flags != props->sample_flags)) { 1706 if (WARN_ON(stream->sample_flags != props->sample_flags)) {
1707 ret = -ENODEV; 1707 ret = -ENODEV;
1708 goto err_alloc; 1708 goto err_flags;
1709 } 1709 }
1710 1710
1711 list_add(&stream->link, &dev_priv->perf.streams); 1711 list_add(&stream->link, &dev_priv->perf.streams);
@@ -1728,6 +1728,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
1728 1728
1729err_open: 1729err_open:
1730 list_del(&stream->link); 1730 list_del(&stream->link);
1731err_flags:
1731 if (stream->ops->destroy) 1732 if (stream->ops->destroy)
1732 stream->ops->destroy(stream); 1733 stream->ops->destroy(stream);
1733err_alloc: 1734err_alloc:
@@ -1793,6 +1794,11 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
1793 if (ret) 1794 if (ret)
1794 return ret; 1795 return ret;
1795 1796
1797 if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
1798 DRM_DEBUG("Unknown i915 perf property ID\n");
1799 return -EINVAL;
1800 }
1801
1796 switch ((enum drm_i915_perf_property_id)id) { 1802 switch ((enum drm_i915_perf_property_id)id) {
1797 case DRM_I915_PERF_PROP_CTX_HANDLE: 1803 case DRM_I915_PERF_PROP_CTX_HANDLE:
1798 props->single_context = 1; 1804 props->single_context = 1;
@@ -1862,9 +1868,8 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
1862 props->oa_periodic = true; 1868 props->oa_periodic = true;
1863 props->oa_period_exponent = value; 1869 props->oa_period_exponent = value;
1864 break; 1870 break;
1865 default: 1871 case DRM_I915_PERF_PROP_MAX:
1866 MISSING_CASE(id); 1872 MISSING_CASE(id);
1867 DRM_DEBUG("Unknown i915 perf property ID\n");
1868 return -EINVAL; 1873 return -EINVAL;
1869 } 1874 }
1870 1875
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 0085bc745f6a..de219b71fb76 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -35,7 +35,6 @@
35 */ 35 */
36 36
37#define I915_CSR_GLK "i915/glk_dmc_ver1_01.bin" 37#define I915_CSR_GLK "i915/glk_dmc_ver1_01.bin"
38MODULE_FIRMWARE(I915_CSR_GLK);
39#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 1) 38#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
40 39
41#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin" 40#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3282b0f4b134..ed1f4f272b4f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -16696,12 +16696,11 @@ int intel_modeset_init(struct drm_device *dev)
16696 } 16696 }
16697 } 16697 }
16698 16698
16699 intel_update_czclk(dev_priv);
16700 intel_update_cdclk(dev_priv);
16701 dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
16702
16703 intel_shared_dpll_init(dev); 16699 intel_shared_dpll_init(dev);
16704 16700
16701 intel_update_czclk(dev_priv);
16702 intel_modeset_init_hw(dev);
16703
16705 if (dev_priv->max_cdclk_freq == 0) 16704 if (dev_priv->max_cdclk_freq == 0)
16706 intel_update_max_cdclk(dev_priv); 16705 intel_update_max_cdclk(dev_priv);
16707 16706
@@ -17258,8 +17257,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
17258 17257
17259 intel_init_gt_powersave(dev_priv); 17258 intel_init_gt_powersave(dev_priv);
17260 17259
17261 intel_modeset_init_hw(dev);
17262
17263 intel_setup_overlay(dev_priv); 17260 intel_setup_overlay(dev_priv);
17264} 17261}
17265 17262
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 371acf109e34..ab1be5c80ea5 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -105,6 +105,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
105 /* Nothing to do here, execute in order of dependencies */ 105 /* Nothing to do here, execute in order of dependencies */
106 engine->schedule = NULL; 106 engine->schedule = NULL;
107 107
108 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
109
108 dev_priv->engine[id] = engine; 110 dev_priv->engine[id] = engine;
109 return 0; 111 return 0;
110} 112}
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index d23c0fcff751..8c04eca84351 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -77,6 +77,11 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
77 goto bail; 77 goto bail;
78 } 78 }
79 79
80 if (!i915.enable_execlists) {
81 DRM_INFO("GPU guest virtualisation [GVT-g] disabled due to disabled execlist submission [i915.enable_execlists module parameter]\n");
82 goto bail;
83 }
84
80 /* 85 /*
81 * We're not in host or fail to find a MPT module, disable GVT-g 86 * We're not in host or fail to find a MPT module, disable GVT-g
82 */ 87 */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index ebae2bd83918..24b2fa5b6282 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1298,16 +1298,34 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
1298 1298
1299static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state) 1299static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
1300{ 1300{
1301 struct drm_device *dev = crtc_state->base.crtc->dev; 1301 struct drm_i915_private *dev_priv =
1302 to_i915(crtc_state->base.crtc->dev);
1303 struct drm_atomic_state *state = crtc_state->base.state;
1304 struct drm_connector_state *connector_state;
1305 struct drm_connector *connector;
1306 int i;
1302 1307
1303 if (HAS_GMCH_DISPLAY(to_i915(dev))) 1308 if (HAS_GMCH_DISPLAY(dev_priv))
1304 return false; 1309 return false;
1305 1310
1306 /* 1311 /*
1307 * HDMI 12bpc affects the clocks, so it's only possible 1312 * HDMI 12bpc affects the clocks, so it's only possible
1308 * when not cloning with other encoder types. 1313 * when not cloning with other encoder types.
1309 */ 1314 */
1310 return crtc_state->output_types == 1 << INTEL_OUTPUT_HDMI; 1315 if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI)
1316 return false;
1317
1318 for_each_connector_in_state(state, connector, connector_state, i) {
1319 const struct drm_display_info *info = &connector->display_info;
1320
1321 if (connector_state->crtc != crtc_state->base.crtc)
1322 continue;
1323
1324 if ((info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36) == 0)
1325 return false;
1326 }
1327
1328 return true;
1311} 1329}
1312 1330
1313bool intel_hdmi_compute_config(struct intel_encoder *encoder, 1331bool intel_hdmi_compute_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index b62e3f8ad415..54208bef7a83 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -219,7 +219,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
219 } 219 }
220 } 220 }
221 } 221 }
222 if (dev_priv->display.hpd_irq_setup) 222 if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
223 dev_priv->display.hpd_irq_setup(dev_priv); 223 dev_priv->display.hpd_irq_setup(dev_priv);
224 spin_unlock_irq(&dev_priv->irq_lock); 224 spin_unlock_irq(&dev_priv->irq_lock);
225 225
@@ -425,7 +425,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
425 } 425 }
426 } 426 }
427 427
428 if (storm_detected) 428 if (storm_detected && dev_priv->display_irqs_enabled)
429 dev_priv->display.hpd_irq_setup(dev_priv); 429 dev_priv->display.hpd_irq_setup(dev_priv);
430 spin_unlock(&dev_priv->irq_lock); 430 spin_unlock(&dev_priv->irq_lock);
431 431
@@ -471,10 +471,12 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
471 * Interrupt setup is already guaranteed to be single-threaded, this is 471 * Interrupt setup is already guaranteed to be single-threaded, this is
472 * just to make the assert_spin_locked checks happy. 472 * just to make the assert_spin_locked checks happy.
473 */ 473 */
474 spin_lock_irq(&dev_priv->irq_lock); 474 if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
475 if (dev_priv->display.hpd_irq_setup) 475 spin_lock_irq(&dev_priv->irq_lock);
476 dev_priv->display.hpd_irq_setup(dev_priv); 476 if (dev_priv->display_irqs_enabled)
477 spin_unlock_irq(&dev_priv->irq_lock); 477 dev_priv->display.hpd_irq_setup(dev_priv);
478 spin_unlock_irq(&dev_priv->irq_lock);
479 }
478} 480}
479 481
480static void i915_hpd_poll_init_work(struct work_struct *work) 482static void i915_hpd_poll_init_work(struct work_struct *work)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index ebf8023d21e6..47517a02f0a4 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -345,7 +345,8 @@ execlists_context_status_change(struct drm_i915_gem_request *rq,
345 if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) 345 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
346 return; 346 return;
347 347
348 atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq); 348 atomic_notifier_call_chain(&rq->engine->context_status_notifier,
349 status, rq);
349} 350}
350 351
351static void 352static void
@@ -669,15 +670,14 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
669static struct intel_engine_cs * 670static struct intel_engine_cs *
670pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked) 671pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
671{ 672{
672 struct intel_engine_cs *engine; 673 struct intel_engine_cs *engine =
674 container_of(pt, struct drm_i915_gem_request, priotree)->engine;
675
676 GEM_BUG_ON(!locked);
673 677
674 engine = container_of(pt,
675 struct drm_i915_gem_request,
676 priotree)->engine;
677 if (engine != locked) { 678 if (engine != locked) {
678 if (locked) 679 spin_unlock(&locked->timeline->lock);
679 spin_unlock_irq(&locked->timeline->lock); 680 spin_lock(&engine->timeline->lock);
680 spin_lock_irq(&engine->timeline->lock);
681 } 681 }
682 682
683 return engine; 683 return engine;
@@ -685,7 +685,7 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
685 685
686static void execlists_schedule(struct drm_i915_gem_request *request, int prio) 686static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
687{ 687{
688 struct intel_engine_cs *engine = NULL; 688 struct intel_engine_cs *engine;
689 struct i915_dependency *dep, *p; 689 struct i915_dependency *dep, *p;
690 struct i915_dependency stack; 690 struct i915_dependency stack;
691 LIST_HEAD(dfs); 691 LIST_HEAD(dfs);
@@ -719,26 +719,23 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
719 list_for_each_entry_safe(dep, p, &dfs, dfs_link) { 719 list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
720 struct i915_priotree *pt = dep->signaler; 720 struct i915_priotree *pt = dep->signaler;
721 721
722 list_for_each_entry(p, &pt->signalers_list, signal_link) 722 /* Within an engine, there can be no cycle, but we may
723 * refer to the same dependency chain multiple times
724 * (redundant dependencies are not eliminated) and across
725 * engines.
726 */
727 list_for_each_entry(p, &pt->signalers_list, signal_link) {
728 GEM_BUG_ON(p->signaler->priority < pt->priority);
723 if (prio > READ_ONCE(p->signaler->priority)) 729 if (prio > READ_ONCE(p->signaler->priority))
724 list_move_tail(&p->dfs_link, &dfs); 730 list_move_tail(&p->dfs_link, &dfs);
731 }
725 732
726 list_safe_reset_next(dep, p, dfs_link); 733 list_safe_reset_next(dep, p, dfs_link);
727 if (!RB_EMPTY_NODE(&pt->node))
728 continue;
729
730 engine = pt_lock_engine(pt, engine);
731
732 /* If it is not already in the rbtree, we can update the
733 * priority inplace and skip over it (and its dependencies)
734 * if it is referenced *again* as we descend the dfs.
735 */
736 if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) {
737 pt->priority = prio;
738 list_del_init(&dep->dfs_link);
739 }
740 } 734 }
741 735
736 engine = request->engine;
737 spin_lock_irq(&engine->timeline->lock);
738
742 /* Fifo and depth-first replacement ensure our deps execute before us */ 739 /* Fifo and depth-first replacement ensure our deps execute before us */
743 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) { 740 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
744 struct i915_priotree *pt = dep->signaler; 741 struct i915_priotree *pt = dep->signaler;
@@ -750,16 +747,15 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
750 if (prio <= pt->priority) 747 if (prio <= pt->priority)
751 continue; 748 continue;
752 749
753 GEM_BUG_ON(RB_EMPTY_NODE(&pt->node));
754
755 pt->priority = prio; 750 pt->priority = prio;
756 rb_erase(&pt->node, &engine->execlist_queue); 751 if (!RB_EMPTY_NODE(&pt->node)) {
757 if (insert_request(pt, &engine->execlist_queue)) 752 rb_erase(&pt->node, &engine->execlist_queue);
758 engine->execlist_first = &pt->node; 753 if (insert_request(pt, &engine->execlist_queue))
754 engine->execlist_first = &pt->node;
755 }
759 } 756 }
760 757
761 if (engine) 758 spin_unlock_irq(&engine->timeline->lock);
762 spin_unlock_irq(&engine->timeline->lock);
763 759
764 /* XXX Do we need to preempt to make room for us and our deps? */ 760 /* XXX Do we need to preempt to make room for us and our deps? */
765} 761}
@@ -1439,7 +1435,9 @@ static void reset_common_ring(struct intel_engine_cs *engine,
1439 GEM_BUG_ON(request->ctx != port[0].request->ctx); 1435 GEM_BUG_ON(request->ctx != port[0].request->ctx);
1440 1436
1441 /* Reset WaIdleLiteRestore:bdw,skl as well */ 1437 /* Reset WaIdleLiteRestore:bdw,skl as well */
1442 request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32); 1438 request->tail =
1439 intel_ring_wrap(request->ring,
1440 request->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
1443} 1441}
1444 1442
1445static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) 1443static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 940bab22d464..6a29784d2b41 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4928,8 +4928,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4928{ 4928{
4929 u32 mask = 0; 4929 u32 mask = 0;
4930 4930
4931 /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
4931 if (val > dev_priv->rps.min_freq_softlimit) 4932 if (val > dev_priv->rps.min_freq_softlimit)
4932 mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; 4933 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
4933 if (val < dev_priv->rps.max_freq_softlimit) 4934 if (val < dev_priv->rps.max_freq_softlimit)
4934 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; 4935 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
4935 4936
@@ -5039,7 +5040,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
5039{ 5040{
5040 mutex_lock(&dev_priv->rps.hw_lock); 5041 mutex_lock(&dev_priv->rps.hw_lock);
5041 if (dev_priv->rps.enabled) { 5042 if (dev_priv->rps.enabled) {
5042 if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) 5043 if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
5043 gen6_rps_reset_ei(dev_priv); 5044 gen6_rps_reset_ei(dev_priv);
5044 I915_WRITE(GEN6_PMINTRMSK, 5045 I915_WRITE(GEN6_PMINTRMSK,
5045 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); 5046 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 91bc4abf5d3e..6c5f9958197d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2024,6 +2024,8 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine,
2024 ret = context_pin(ctx, flags); 2024 ret = context_pin(ctx, flags);
2025 if (ret) 2025 if (ret)
2026 goto error; 2026 goto error;
2027
2028 ce->state->obj->mm.dirty = true;
2027 } 2029 }
2028 2030
2029 /* The kernel context is only used as a placeholder for flushing the 2031 /* The kernel context is only used as a placeholder for flushing the
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 79c2b8d72322..8cb2078c5bfc 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -403,6 +403,9 @@ struct intel_engine_cs {
403 */ 403 */
404 struct i915_gem_context *legacy_active_context; 404 struct i915_gem_context *legacy_active_context;
405 405
406 /* status_notifier: list of callbacks for context-switch changes */
407 struct atomic_notifier_head context_status_notifier;
408
406 struct intel_engine_hangcheck hangcheck; 409 struct intel_engine_hangcheck hangcheck;
407 410
408 bool needs_cmd_parser; 411 bool needs_cmd_parser;
@@ -518,11 +521,17 @@ static inline void intel_ring_advance(struct intel_ring *ring)
518 */ 521 */
519} 522}
520 523
524static inline u32
525intel_ring_wrap(const struct intel_ring *ring, u32 pos)
526{
527 return pos & (ring->size - 1);
528}
529
521static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr) 530static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
522{ 531{
523 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ 532 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
524 u32 offset = addr - ring->vaddr; 533 u32 offset = addr - ring->vaddr;
525 return offset & (ring->size - 1); 534 return intel_ring_wrap(ring, offset);
526} 535}
527 536
528int __intel_ring_space(int head, int tail, int size); 537int __intel_ring_space(int head, int tail, int size);
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 4414cf73735d..36602ac7e248 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1,4 +1,4 @@
1/* Copyright (c) 2016 The Linux Foundation. All rights reserved. 1/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
2 * 2 *
3 * This program is free software; you can redistribute it and/or modify 3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and 4 * it under the terms of the GNU General Public License version 2 and
@@ -534,7 +534,7 @@ static void a5xx_destroy(struct msm_gpu *gpu)
534 } 534 }
535 535
536 if (a5xx_gpu->gpmu_bo) { 536 if (a5xx_gpu->gpmu_bo) {
537 if (a5xx_gpu->gpmu_bo) 537 if (a5xx_gpu->gpmu_iova)
538 msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id); 538 msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
539 drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo); 539 drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
540 } 540 }
@@ -860,7 +860,9 @@ static const struct adreno_gpu_funcs funcs = {
860 .idle = a5xx_idle, 860 .idle = a5xx_idle,
861 .irq = a5xx_irq, 861 .irq = a5xx_irq,
862 .destroy = a5xx_destroy, 862 .destroy = a5xx_destroy,
863#ifdef CONFIG_DEBUG_FS
863 .show = a5xx_show, 864 .show = a5xx_show,
865#endif
864 }, 866 },
865 .get_timestamp = a5xx_get_timestamp, 867 .get_timestamp = a5xx_get_timestamp,
866}; 868};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index c9bd1e6225f4..5ae65426b4e5 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -418,18 +418,27 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
418 return 0; 418 return 0;
419} 419}
420 420
421void adreno_gpu_cleanup(struct adreno_gpu *gpu) 421void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
422{ 422{
423 if (gpu->memptrs_bo) { 423 struct msm_gpu *gpu = &adreno_gpu->base;
424 if (gpu->memptrs) 424
425 msm_gem_put_vaddr(gpu->memptrs_bo); 425 if (adreno_gpu->memptrs_bo) {
426 if (adreno_gpu->memptrs)
427 msm_gem_put_vaddr(adreno_gpu->memptrs_bo);
428
429 if (adreno_gpu->memptrs_iova)
430 msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->id);
431
432 drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo);
433 }
434 release_firmware(adreno_gpu->pm4);
435 release_firmware(adreno_gpu->pfp);
426 436
427 if (gpu->memptrs_iova) 437 msm_gpu_cleanup(gpu);
428 msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
429 438
430 drm_gem_object_unreference_unlocked(gpu->memptrs_bo); 439 if (gpu->aspace) {
440 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
441 iommu_ports, ARRAY_SIZE(iommu_ports));
442 msm_gem_address_space_destroy(gpu->aspace);
431 } 443 }
432 release_firmware(gpu->pm4);
433 release_firmware(gpu->pfp);
434 msm_gpu_cleanup(&gpu->base);
435} 444}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 921270ea6059..a879ffa534b4 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -171,7 +171,7 @@ dsi_mgr_phy_enable(int id,
171 } 171 }
172 } 172 }
173 } else { 173 } else {
174 msm_dsi_host_reset_phy(mdsi->host); 174 msm_dsi_host_reset_phy(msm_dsi->host);
175 ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]); 175 ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]);
176 if (ret) 176 if (ret)
177 return ret; 177 return ret;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index a54d3bb5baad..8177e8511afd 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -18,13 +18,6 @@
18#include <linux/hdmi.h> 18#include <linux/hdmi.h>
19#include "hdmi.h" 19#include "hdmi.h"
20 20
21
22/* Supported HDMI Audio channels */
23#define MSM_HDMI_AUDIO_CHANNEL_2 0
24#define MSM_HDMI_AUDIO_CHANNEL_4 1
25#define MSM_HDMI_AUDIO_CHANNEL_6 2
26#define MSM_HDMI_AUDIO_CHANNEL_8 3
27
28/* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */ 21/* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */
29static int nchannels[] = { 2, 4, 6, 8 }; 22static int nchannels[] = { 2, 4, 6, 8 };
30 23
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
index 611da7a660c9..238901987e00 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
@@ -18,7 +18,8 @@
18#ifndef __MDP5_PIPE_H__ 18#ifndef __MDP5_PIPE_H__
19#define __MDP5_PIPE_H__ 19#define __MDP5_PIPE_H__
20 20
21#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */ 21/* TODO: Add SSPP_MAX in mdp5.xml.h */
22#define SSPP_MAX (SSPP_CURSOR1 + 1)
22 23
23/* represents a hw pipe, which is dynamically assigned to a plane */ 24/* represents a hw pipe, which is dynamically assigned to a plane */
24struct mdp5_hw_pipe { 25struct mdp5_hw_pipe {
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 59811f29607d..68e509b3b9e4 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -812,6 +812,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
812 812
813 size = PAGE_ALIGN(size); 813 size = PAGE_ALIGN(size);
814 814
815 /* Disallow zero sized objects as they make the underlying
816 * infrastructure grumpy
817 */
818 if (size == 0)
819 return ERR_PTR(-EINVAL);
820
815 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj); 821 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
816 if (ret) 822 if (ret)
817 goto fail; 823 goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 99e05aacbee1..af5b6ba4095b 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -706,9 +706,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
706 msm_ringbuffer_destroy(gpu->rb); 706 msm_ringbuffer_destroy(gpu->rb);
707 } 707 }
708 708
709 if (gpu->aspace)
710 msm_gem_address_space_destroy(gpu->aspace);
711
712 if (gpu->fctx) 709 if (gpu->fctx)
713 msm_fence_context_free(gpu->fctx); 710 msm_fence_context_free(gpu->fctx);
714} 711}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 0b4440ffbeae..a9182d5e6011 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -995,7 +995,6 @@ nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
995{ 995{
996 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state); 996 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
997 __drm_atomic_helper_plane_destroy_state(&asyw->state); 997 __drm_atomic_helper_plane_destroy_state(&asyw->state);
998 dma_fence_put(asyw->state.fence);
999 kfree(asyw); 998 kfree(asyw);
1000} 999}
1001 1000
@@ -1007,7 +1006,6 @@ nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
1007 if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL))) 1006 if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
1008 return NULL; 1007 return NULL;
1009 __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state); 1008 __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
1010 asyw->state.fence = NULL;
1011 asyw->interval = 1; 1009 asyw->interval = 1;
1012 asyw->sema = armw->sema; 1010 asyw->sema = armw->sema;
1013 asyw->ntfy = armw->ntfy; 1011 asyw->ntfy = armw->ntfy;
@@ -2036,6 +2034,7 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
2036 u32 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace; 2034 u32 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
2037 u32 hfrontp = mode->hsync_start - mode->hdisplay; 2035 u32 hfrontp = mode->hsync_start - mode->hdisplay;
2038 u32 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace; 2036 u32 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
2037 u32 blankus;
2039 struct nv50_head_mode *m = &asyh->mode; 2038 struct nv50_head_mode *m = &asyh->mode;
2040 2039
2041 m->h.active = mode->htotal; 2040 m->h.active = mode->htotal;
@@ -2049,9 +2048,10 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
2049 m->v.blanks = m->v.active - vfrontp - 1; 2048 m->v.blanks = m->v.active - vfrontp - 1;
2050 2049
2051 /*XXX: Safe underestimate, even "0" works */ 2050 /*XXX: Safe underestimate, even "0" works */
2052 m->v.blankus = (m->v.active - mode->vdisplay - 2) * m->h.active; 2051 blankus = (m->v.active - mode->vdisplay - 2) * m->h.active;
2053 m->v.blankus *= 1000; 2052 blankus *= 1000;
2054 m->v.blankus /= mode->clock; 2053 blankus /= mode->clock;
2054 m->v.blankus = blankus;
2055 2055
2056 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 2056 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
2057 m->v.blank2e = m->v.active + m->v.synce + vbackp; 2057 m->v.blank2e = m->v.active + m->v.synce + vbackp;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 273562dd6bbd..3b86a7399567 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -714,7 +714,7 @@ nv4a_chipset = {
714 .i2c = nv04_i2c_new, 714 .i2c = nv04_i2c_new,
715 .imem = nv40_instmem_new, 715 .imem = nv40_instmem_new,
716 .mc = nv44_mc_new, 716 .mc = nv44_mc_new,
717 .mmu = nv44_mmu_new, 717 .mmu = nv04_mmu_new,
718 .pci = nv40_pci_new, 718 .pci = nv40_pci_new,
719 .therm = nv40_therm_new, 719 .therm = nv40_therm_new,
720 .timer = nv41_timer_new, 720 .timer = nv41_timer_new,
@@ -2271,6 +2271,35 @@ nv136_chipset = {
2271 .fifo = gp100_fifo_new, 2271 .fifo = gp100_fifo_new,
2272}; 2272};
2273 2273
2274static const struct nvkm_device_chip
2275nv137_chipset = {
2276 .name = "GP107",
2277 .bar = gf100_bar_new,
2278 .bios = nvkm_bios_new,
2279 .bus = gf100_bus_new,
2280 .devinit = gm200_devinit_new,
2281 .fb = gp102_fb_new,
2282 .fuse = gm107_fuse_new,
2283 .gpio = gk104_gpio_new,
2284 .i2c = gm200_i2c_new,
2285 .ibus = gm200_ibus_new,
2286 .imem = nv50_instmem_new,
2287 .ltc = gp100_ltc_new,
2288 .mc = gp100_mc_new,
2289 .mmu = gf100_mmu_new,
2290 .pci = gp100_pci_new,
2291 .pmu = gp102_pmu_new,
2292 .timer = gk20a_timer_new,
2293 .top = gk104_top_new,
2294 .ce[0] = gp102_ce_new,
2295 .ce[1] = gp102_ce_new,
2296 .ce[2] = gp102_ce_new,
2297 .ce[3] = gp102_ce_new,
2298 .disp = gp102_disp_new,
2299 .dma = gf119_dma_new,
2300 .fifo = gp100_fifo_new,
2301};
2302
2274static int 2303static int
2275nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size, 2304nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
2276 struct nvkm_notify *notify) 2305 struct nvkm_notify *notify)
@@ -2708,6 +2737,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
2708 case 0x132: device->chip = &nv132_chipset; break; 2737 case 0x132: device->chip = &nv132_chipset; break;
2709 case 0x134: device->chip = &nv134_chipset; break; 2738 case 0x134: device->chip = &nv134_chipset; break;
2710 case 0x136: device->chip = &nv136_chipset; break; 2739 case 0x136: device->chip = &nv136_chipset; break;
2740 case 0x137: device->chip = &nv137_chipset; break;
2711 default: 2741 default:
2712 nvdev_error(device, "unknown chipset (%08x)\n", boot0); 2742 nvdev_error(device, "unknown chipset (%08x)\n", boot0);
2713 goto done; 2743 goto done;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index 003ac915eaad..8a8895246d26 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -198,7 +198,7 @@ nv31_mpeg_intr(struct nvkm_engine *engine)
198 } 198 }
199 199
200 if (type == 0x00000010) { 200 if (type == 0x00000010) {
201 if (!nv31_mpeg_mthd(mpeg, mthd, data)) 201 if (nv31_mpeg_mthd(mpeg, mthd, data))
202 show &= ~0x01000000; 202 show &= ~0x01000000;
203 } 203 }
204 } 204 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
index e536f37e24b0..c3cf02ed468e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
@@ -172,7 +172,7 @@ nv44_mpeg_intr(struct nvkm_engine *engine)
172 } 172 }
173 173
174 if (type == 0x00000010) { 174 if (type == 0x00000010) {
175 if (!nv44_mpeg_mthd(subdev->device, mthd, data)) 175 if (nv44_mpeg_mthd(subdev->device, mthd, data))
176 show &= ~0x01000000; 176 show &= ~0x01000000;
177 } 177 }
178 } 178 }
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 684f1703aa5c..aaa3e80fecb4 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -213,8 +213,8 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
213 rbo->placement.num_busy_placement = 0; 213 rbo->placement.num_busy_placement = 0;
214 for (i = 0; i < rbo->placement.num_placement; i++) { 214 for (i = 0; i < rbo->placement.num_placement; i++) {
215 if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) { 215 if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
216 if (rbo->placements[0].fpfn < fpfn) 216 if (rbo->placements[i].fpfn < fpfn)
217 rbo->placements[0].fpfn = fpfn; 217 rbo->placements[i].fpfn = fpfn;
218 } else { 218 } else {
219 rbo->placement.busy_placement = 219 rbo->placement.busy_placement =
220 &rbo->placements[i]; 220 &rbo->placements[i];
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 72e1588580a1..c7af9fdd20c7 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2985,9 +2985,13 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2985 max_sclk = 75000; 2985 max_sclk = 75000;
2986 } 2986 }
2987 } else if (rdev->family == CHIP_OLAND) { 2987 } else if (rdev->family == CHIP_OLAND) {
2988 if ((rdev->pdev->device == 0x6604) && 2988 if ((rdev->pdev->revision == 0xC7) ||
2989 (rdev->pdev->subsystem_vendor == 0x1028) && 2989 (rdev->pdev->revision == 0x80) ||
2990 (rdev->pdev->subsystem_device == 0x066F)) { 2990 (rdev->pdev->revision == 0x81) ||
2991 (rdev->pdev->revision == 0x83) ||
2992 (rdev->pdev->revision == 0x87) ||
2993 (rdev->pdev->device == 0x6604) ||
2994 (rdev->pdev->device == 0x6605)) {
2991 max_sclk = 75000; 2995 max_sclk = 75000;
2992 } 2996 }
2993 } 2997 }
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index fdb451e3ec01..26a7ad0f4789 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -179,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
179 if (unlikely(ret != 0)) 179 if (unlikely(ret != 0))
180 goto out_err0; 180 goto out_err0;
181 181
182 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); 182 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
183 if (unlikely(ret != 0)) 183 if (unlikely(ret != 0))
184 goto out_err1; 184 goto out_err1;
185 185
@@ -318,7 +318,8 @@ EXPORT_SYMBOL(ttm_ref_object_exists);
318 318
319int ttm_ref_object_add(struct ttm_object_file *tfile, 319int ttm_ref_object_add(struct ttm_object_file *tfile,
320 struct ttm_base_object *base, 320 struct ttm_base_object *base,
321 enum ttm_ref_type ref_type, bool *existed) 321 enum ttm_ref_type ref_type, bool *existed,
322 bool require_existed)
322{ 323{
323 struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; 324 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
324 struct ttm_ref_object *ref; 325 struct ttm_ref_object *ref;
@@ -345,6 +346,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
345 } 346 }
346 347
347 rcu_read_unlock(); 348 rcu_read_unlock();
349 if (require_existed)
350 return -EPERM;
351
348 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), 352 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
349 false, false); 353 false, false);
350 if (unlikely(ret != 0)) 354 if (unlikely(ret != 0))
@@ -449,10 +453,10 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
449 ttm_ref_object_release(&ref->kref); 453 ttm_ref_object_release(&ref->kref);
450 } 454 }
451 455
456 spin_unlock(&tfile->lock);
452 for (i = 0; i < TTM_REF_NUM; ++i) 457 for (i = 0; i < TTM_REF_NUM; ++i)
453 drm_ht_remove(&tfile->ref_hash[i]); 458 drm_ht_remove(&tfile->ref_hash[i]);
454 459
455 spin_unlock(&tfile->lock);
456 ttm_object_file_unref(&tfile); 460 ttm_object_file_unref(&tfile);
457} 461}
458EXPORT_SYMBOL(ttm_object_file_release); 462EXPORT_SYMBOL(ttm_object_file_release);
@@ -529,9 +533,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
529 533
530 *p_tdev = NULL; 534 *p_tdev = NULL;
531 535
532 spin_lock(&tdev->object_lock);
533 drm_ht_remove(&tdev->object_hash); 536 drm_ht_remove(&tdev->object_hash);
534 spin_unlock(&tdev->object_lock);
535 537
536 kfree(tdev); 538 kfree(tdev);
537} 539}
@@ -635,7 +637,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
635 prime = (struct ttm_prime_object *) dma_buf->priv; 637 prime = (struct ttm_prime_object *) dma_buf->priv;
636 base = &prime->base; 638 base = &prime->base;
637 *handle = base->hash.key; 639 *handle = base->hash.key;
638 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); 640 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
639 641
640 dma_buf_put(dma_buf); 642 dma_buf_put(dma_buf);
641 643
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 917dcb978c2c..0c87b1ac6b68 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/fb.h> 15#include <linux/fb.h>
16#include <linux/prefetch.h> 16#include <linux/prefetch.h>
17#include <asm/unaligned.h>
17 18
18#include <drm/drmP.h> 19#include <drm/drmP.h>
19#include "udl_drv.h" 20#include "udl_drv.h"
@@ -163,7 +164,7 @@ static void udl_compress_hline16(
163 const u8 *const start = pixel; 164 const u8 *const start = pixel;
164 const uint16_t repeating_pixel_val16 = pixel_val16; 165 const uint16_t repeating_pixel_val16 = pixel_val16;
165 166
166 *(uint16_t *)cmd = cpu_to_be16(pixel_val16); 167 put_unaligned_be16(pixel_val16, cmd);
167 168
168 cmd += 2; 169 cmd += 2;
169 pixel += bpp; 170 pixel += bpp;
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 0c06844af445..9fcf05ca492b 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -846,6 +846,17 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
846 drm_atomic_helper_crtc_destroy_state(crtc, state); 846 drm_atomic_helper_crtc_destroy_state(crtc, state);
847} 847}
848 848
849static void
850vc4_crtc_reset(struct drm_crtc *crtc)
851{
852 if (crtc->state)
853 __drm_atomic_helper_crtc_destroy_state(crtc->state);
854
855 crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
856 if (crtc->state)
857 crtc->state->crtc = crtc;
858}
859
849static const struct drm_crtc_funcs vc4_crtc_funcs = { 860static const struct drm_crtc_funcs vc4_crtc_funcs = {
850 .set_config = drm_atomic_helper_set_config, 861 .set_config = drm_atomic_helper_set_config,
851 .destroy = vc4_crtc_destroy, 862 .destroy = vc4_crtc_destroy,
@@ -853,7 +864,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
853 .set_property = NULL, 864 .set_property = NULL,
854 .cursor_set = NULL, /* handled by drm_mode_cursor_universal */ 865 .cursor_set = NULL, /* handled by drm_mode_cursor_universal */
855 .cursor_move = NULL, /* handled by drm_mode_cursor_universal */ 866 .cursor_move = NULL, /* handled by drm_mode_cursor_universal */
856 .reset = drm_atomic_helper_crtc_reset, 867 .reset = vc4_crtc_reset,
857 .atomic_duplicate_state = vc4_crtc_duplicate_state, 868 .atomic_duplicate_state = vc4_crtc_duplicate_state,
858 .atomic_destroy_state = vc4_crtc_destroy_state, 869 .atomic_destroy_state = vc4_crtc_destroy_state,
859 .gamma_set = vc4_crtc_gamma_set, 870 .gamma_set = vc4_crtc_gamma_set,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 6541dd8b82dc..6b2708b4eafe 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -538,7 +538,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
538 struct vmw_fence_obj **p_fence) 538 struct vmw_fence_obj **p_fence)
539{ 539{
540 struct vmw_fence_obj *fence; 540 struct vmw_fence_obj *fence;
541 int ret; 541 int ret;
542 542
543 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 543 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
544 if (unlikely(fence == NULL)) 544 if (unlikely(fence == NULL))
@@ -701,6 +701,41 @@ void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
701} 701}
702 702
703 703
704/**
705 * vmw_fence_obj_lookup - Look up a user-space fence object
706 *
707 * @tfile: A struct ttm_object_file identifying the caller.
708 * @handle: A handle identifying the fence object.
709 * @return: A struct vmw_user_fence base ttm object on success or
710 * an error pointer on failure.
711 *
712 * The fence object is looked up and type-checked. The caller needs
713 * to have opened the fence object first, but since that happens on
714 * creation and fence objects aren't shareable, that's not an
715 * issue currently.
716 */
717static struct ttm_base_object *
718vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
719{
720 struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
721
722 if (!base) {
723 pr_err("Invalid fence object handle 0x%08lx.\n",
724 (unsigned long)handle);
725 return ERR_PTR(-EINVAL);
726 }
727
728 if (base->refcount_release != vmw_user_fence_base_release) {
729 pr_err("Invalid fence object handle 0x%08lx.\n",
730 (unsigned long)handle);
731 ttm_base_object_unref(&base);
732 return ERR_PTR(-EINVAL);
733 }
734
735 return base;
736}
737
738
704int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, 739int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
705 struct drm_file *file_priv) 740 struct drm_file *file_priv)
706{ 741{
@@ -726,13 +761,9 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
726 arg->kernel_cookie = jiffies + wait_timeout; 761 arg->kernel_cookie = jiffies + wait_timeout;
727 } 762 }
728 763
729 base = ttm_base_object_lookup(tfile, arg->handle); 764 base = vmw_fence_obj_lookup(tfile, arg->handle);
730 if (unlikely(base == NULL)) { 765 if (IS_ERR(base))
731 printk(KERN_ERR "Wait invalid fence object handle " 766 return PTR_ERR(base);
732 "0x%08lx.\n",
733 (unsigned long)arg->handle);
734 return -EINVAL;
735 }
736 767
737 fence = &(container_of(base, struct vmw_user_fence, base)->fence); 768 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
738 769
@@ -771,13 +802,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
771 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 802 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
772 struct vmw_private *dev_priv = vmw_priv(dev); 803 struct vmw_private *dev_priv = vmw_priv(dev);
773 804
774 base = ttm_base_object_lookup(tfile, arg->handle); 805 base = vmw_fence_obj_lookup(tfile, arg->handle);
775 if (unlikely(base == NULL)) { 806 if (IS_ERR(base))
776 printk(KERN_ERR "Fence signaled invalid fence object handle " 807 return PTR_ERR(base);
777 "0x%08lx.\n",
778 (unsigned long)arg->handle);
779 return -EINVAL;
780 }
781 808
782 fence = &(container_of(base, struct vmw_user_fence, base)->fence); 809 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
783 fman = fman_from_fence(fence); 810 fman = fman_from_fence(fence);
@@ -1024,6 +1051,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1024 (struct drm_vmw_fence_event_arg *) data; 1051 (struct drm_vmw_fence_event_arg *) data;
1025 struct vmw_fence_obj *fence = NULL; 1052 struct vmw_fence_obj *fence = NULL;
1026 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1053 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1054 struct ttm_object_file *tfile = vmw_fp->tfile;
1027 struct drm_vmw_fence_rep __user *user_fence_rep = 1055 struct drm_vmw_fence_rep __user *user_fence_rep =
1028 (struct drm_vmw_fence_rep __user *)(unsigned long) 1056 (struct drm_vmw_fence_rep __user *)(unsigned long)
1029 arg->fence_rep; 1057 arg->fence_rep;
@@ -1037,24 +1065,18 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1037 */ 1065 */
1038 if (arg->handle) { 1066 if (arg->handle) {
1039 struct ttm_base_object *base = 1067 struct ttm_base_object *base =
1040 ttm_base_object_lookup_for_ref(dev_priv->tdev, 1068 vmw_fence_obj_lookup(tfile, arg->handle);
1041 arg->handle); 1069
1042 1070 if (IS_ERR(base))
1043 if (unlikely(base == NULL)) { 1071 return PTR_ERR(base);
1044 DRM_ERROR("Fence event invalid fence object handle " 1072
1045 "0x%08lx.\n",
1046 (unsigned long)arg->handle);
1047 return -EINVAL;
1048 }
1049 fence = &(container_of(base, struct vmw_user_fence, 1073 fence = &(container_of(base, struct vmw_user_fence,
1050 base)->fence); 1074 base)->fence);
1051 (void) vmw_fence_obj_reference(fence); 1075 (void) vmw_fence_obj_reference(fence);
1052 1076
1053 if (user_fence_rep != NULL) { 1077 if (user_fence_rep != NULL) {
1054 bool existed;
1055
1056 ret = ttm_ref_object_add(vmw_fp->tfile, base, 1078 ret = ttm_ref_object_add(vmw_fp->tfile, base,
1057 TTM_REF_USAGE, &existed); 1079 TTM_REF_USAGE, NULL, false);
1058 if (unlikely(ret != 0)) { 1080 if (unlikely(ret != 0)) {
1059 DRM_ERROR("Failed to reference a fence " 1081 DRM_ERROR("Failed to reference a fence "
1060 "object.\n"); 1082 "object.\n");
@@ -1097,8 +1119,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1097 return 0; 1119 return 0;
1098out_no_create: 1120out_no_create:
1099 if (user_fence_rep != NULL) 1121 if (user_fence_rep != NULL)
1100 ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, 1122 ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
1101 handle, TTM_REF_USAGE);
1102out_no_ref_obj: 1123out_no_ref_obj:
1103 vmw_fence_obj_unreference(&fence); 1124 vmw_fence_obj_unreference(&fence);
1104 return ret; 1125 return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index b8c6a03c8c54..5ec24fd801cd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -114,8 +114,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
114 param->value = dev_priv->has_dx; 114 param->value = dev_priv->has_dx;
115 break; 115 break;
116 default: 116 default:
117 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
118 param->param);
119 return -EINVAL; 117 return -EINVAL;
120 } 118 }
121 119
@@ -186,7 +184,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
186 bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); 184 bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
187 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 185 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
188 186
189 if (unlikely(arg->pad64 != 0)) { 187 if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
190 DRM_ERROR("Illegal GET_3D_CAP argument.\n"); 188 DRM_ERROR("Illegal GET_3D_CAP argument.\n");
191 return -EINVAL; 189 return -EINVAL;
192 } 190 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 65b3f0369636..bf23153d4f55 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -589,7 +589,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
589 return ret; 589 return ret;
590 590
591 ret = ttm_ref_object_add(tfile, &user_bo->prime.base, 591 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
592 TTM_REF_SYNCCPU_WRITE, &existed); 592 TTM_REF_SYNCCPU_WRITE, &existed, false);
593 if (ret != 0 || existed) 593 if (ret != 0 || existed)
594 ttm_bo_synccpu_write_release(&user_bo->dma.base); 594 ttm_bo_synccpu_write_release(&user_bo->dma.base);
595 595
@@ -773,7 +773,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
773 773
774 *handle = user_bo->prime.base.hash.key; 774 *handle = user_bo->prime.base.hash.key;
775 return ttm_ref_object_add(tfile, &user_bo->prime.base, 775 return ttm_ref_object_add(tfile, &user_bo->prime.base,
776 TTM_REF_USAGE, NULL); 776 TTM_REF_USAGE, NULL, false);
777} 777}
778 778
779/* 779/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index b445ce9b9757..05fa092c942b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -713,11 +713,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
713 128; 713 128;
714 714
715 num_sizes = 0; 715 num_sizes = 0;
716 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) 716 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
717 if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
718 return -EINVAL;
717 num_sizes += req->mip_levels[i]; 719 num_sizes += req->mip_levels[i];
720 }
718 721
719 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * 722 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
720 DRM_VMW_MAX_MIP_LEVELS) 723 num_sizes == 0)
721 return -EINVAL; 724 return -EINVAL;
722 725
723 size = vmw_user_surface_size + 128 + 726 size = vmw_user_surface_size + 128 +
@@ -891,17 +894,16 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
891 uint32_t handle; 894 uint32_t handle;
892 struct ttm_base_object *base; 895 struct ttm_base_object *base;
893 int ret; 896 int ret;
897 bool require_exist = false;
894 898
895 if (handle_type == DRM_VMW_HANDLE_PRIME) { 899 if (handle_type == DRM_VMW_HANDLE_PRIME) {
896 ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle); 900 ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
897 if (unlikely(ret != 0)) 901 if (unlikely(ret != 0))
898 return ret; 902 return ret;
899 } else { 903 } else {
900 if (unlikely(drm_is_render_client(file_priv))) { 904 if (unlikely(drm_is_render_client(file_priv)))
901 DRM_ERROR("Render client refused legacy " 905 require_exist = true;
902 "surface reference.\n"); 906
903 return -EACCES;
904 }
905 if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) { 907 if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
906 DRM_ERROR("Locked master refused legacy " 908 DRM_ERROR("Locked master refused legacy "
907 "surface reference.\n"); 909 "surface reference.\n");
@@ -929,17 +931,14 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
929 931
930 /* 932 /*
931 * Make sure the surface creator has the same 933 * Make sure the surface creator has the same
932 * authenticating master. 934 * authenticating master, or is already registered with us.
933 */ 935 */
934 if (drm_is_primary_client(file_priv) && 936 if (drm_is_primary_client(file_priv) &&
935 user_srf->master != file_priv->master) { 937 user_srf->master != file_priv->master)
936 DRM_ERROR("Trying to reference surface outside of" 938 require_exist = true;
937 " master domain.\n");
938 ret = -EACCES;
939 goto out_bad_resource;
940 }
941 939
942 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); 940 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
941 require_exist);
943 if (unlikely(ret != 0)) { 942 if (unlikely(ret != 0)) {
944 DRM_ERROR("Could not add a reference to a surface.\n"); 943 DRM_ERROR("Could not add a reference to a surface.\n");
945 goto out_bad_resource; 944 goto out_bad_resource;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 1aeb80e52424..8c54cb8f5d6d 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -175,11 +175,11 @@ config HID_CHERRY
175 Support for Cherry Cymotion keyboard. 175 Support for Cherry Cymotion keyboard.
176 176
177config HID_CHICONY 177config HID_CHICONY
178 tristate "Chicony Tactical pad" 178 tristate "Chicony devices"
179 depends on HID 179 depends on HID
180 default !EXPERT 180 default !EXPERT
181 ---help--- 181 ---help---
182 Support for Chicony Tactical pad. 182 Support for Chicony Tactical pad and special keys on Chicony keyboards.
183 183
184config HID_CORSAIR 184config HID_CORSAIR
185 tristate "Corsair devices" 185 tristate "Corsair devices"
@@ -190,6 +190,7 @@ config HID_CORSAIR
190 190
191 Supported devices: 191 Supported devices:
192 - Vengeance K90 192 - Vengeance K90
193 - Scimitar PRO RGB
193 194
194config HID_PRODIKEYS 195config HID_PRODIKEYS
195 tristate "Prodikeys PC-MIDI Keyboard support" 196 tristate "Prodikeys PC-MIDI Keyboard support"
diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
index bc3cec199fee..f04ed9aabc3f 100644
--- a/drivers/hid/hid-chicony.c
+++ b/drivers/hid/hid-chicony.c
@@ -86,6 +86,7 @@ static const struct hid_device_id ch_devices[] = {
86 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) }, 86 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
87 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, 87 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
88 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, 88 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
89 { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
89 { } 90 { }
90}; 91};
91MODULE_DEVICE_TABLE(hid, ch_devices); 92MODULE_DEVICE_TABLE(hid, ch_devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index e9e87d337446..d162f0dc76e3 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -819,8 +819,7 @@ static int hid_scan_report(struct hid_device *hid)
819 hid->group = HID_GROUP_WACOM; 819 hid->group = HID_GROUP_WACOM;
820 break; 820 break;
821 case USB_VENDOR_ID_SYNAPTICS: 821 case USB_VENDOR_ID_SYNAPTICS:
822 if (hid->group == HID_GROUP_GENERIC || 822 if (hid->group == HID_GROUP_GENERIC)
823 hid->group == HID_GROUP_MULTITOUCH_WIN_8)
824 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC) 823 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
825 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER)) 824 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
826 /* 825 /*
@@ -1870,6 +1869,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1870 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, 1869 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
1871 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, 1870 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
1872 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) }, 1871 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) },
1872 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
1873 { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, 1873 { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
1874 { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) }, 1874 { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
1875 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, 1875 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
@@ -1910,6 +1910,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1910 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, 1910 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
1911 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) }, 1911 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
1912 { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) }, 1912 { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
1913 { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
1913 { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, 1914 { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
1914 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, 1915 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
1915 { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, 1916 { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
@@ -2094,6 +2095,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
2094 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, 2095 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
2095 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, 2096 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
2096 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, 2097 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
2098 { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
2097 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, 2099 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
2098 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, 2100 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
2099 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) }, 2101 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
@@ -2110,6 +2112,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
2110 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) }, 2112 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
2111 { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) }, 2113 { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
2112 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, 2114 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
2115 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
2113 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, 2116 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
2114 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, 2117 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
2115 { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) }, 2118 { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c
index c0303f61c26a..9ba5d98a1180 100644
--- a/drivers/hid/hid-corsair.c
+++ b/drivers/hid/hid-corsair.c
@@ -3,8 +3,10 @@
3 * 3 *
4 * Supported devices: 4 * Supported devices:
5 * - Vengeance K90 Keyboard 5 * - Vengeance K90 Keyboard
6 * - Scimitar PRO RGB Gaming Mouse
6 * 7 *
7 * Copyright (c) 2015 Clement Vuchener 8 * Copyright (c) 2015 Clement Vuchener
9 * Copyright (c) 2017 Oscar Campos
8 */ 10 */
9 11
10/* 12/*
@@ -670,10 +672,51 @@ static int corsair_input_mapping(struct hid_device *dev,
670 return 0; 672 return 0;
671} 673}
672 674
675/*
676 * The report descriptor of Corsair Scimitar RGB Pro gaming mouse is
677 * non parseable as they define two consecutive Logical Minimum for
678 * the Usage Page (Consumer) in rdescs bytes 75 and 77 being 77 0x16
679 * that should be obviousy 0x26 for Logical Magimum of 16 bits. This
680 * prevents poper parsing of the report descriptor due Logical
681 * Minimum being larger than Logical Maximum.
682 *
683 * This driver fixes the report descriptor for:
684 * - USB ID b1c:1b3e, sold as Scimitar RGB Pro Gaming mouse
685 */
686
687static __u8 *corsair_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
688 unsigned int *rsize)
689{
690 struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
691
692 if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
693 /*
694 * Corsair Scimitar RGB Pro report descriptor is broken and
695 * defines two different Logical Minimum for the Consumer
696 * Application. The byte 77 should be a 0x26 defining a 16
697 * bits integer for the Logical Maximum but it is a 0x16
698 * instead (Logical Minimum)
699 */
700 switch (hdev->product) {
701 case USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB:
702 if (*rsize >= 172 && rdesc[75] == 0x15 && rdesc[77] == 0x16
703 && rdesc[78] == 0xff && rdesc[79] == 0x0f) {
704 hid_info(hdev, "Fixing up report descriptor\n");
705 rdesc[77] = 0x26;
706 }
707 break;
708 }
709
710 }
711 return rdesc;
712}
713
673static const struct hid_device_id corsair_devices[] = { 714static const struct hid_device_id corsair_devices[] = {
674 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90), 715 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90),
675 .driver_data = CORSAIR_USE_K90_MACRO | 716 .driver_data = CORSAIR_USE_K90_MACRO |
676 CORSAIR_USE_K90_BACKLIGHT }, 717 CORSAIR_USE_K90_BACKLIGHT },
718 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR,
719 USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
677 {} 720 {}
678}; 721};
679 722
@@ -686,10 +729,14 @@ static struct hid_driver corsair_driver = {
686 .event = corsair_event, 729 .event = corsair_event,
687 .remove = corsair_remove, 730 .remove = corsair_remove,
688 .input_mapping = corsair_input_mapping, 731 .input_mapping = corsair_input_mapping,
732 .report_fixup = corsair_mouse_report_fixup,
689}; 733};
690 734
691module_hid_driver(corsair_driver); 735module_hid_driver(corsair_driver);
692 736
693MODULE_LICENSE("GPL"); 737MODULE_LICENSE("GPL");
738/* Original K90 driver author */
694MODULE_AUTHOR("Clement Vuchener"); 739MODULE_AUTHOR("Clement Vuchener");
740/* Scimitar PRO RGB driver author */
741MODULE_AUTHOR("Oscar Campos");
695MODULE_DESCRIPTION("HID driver for Corsair devices"); 742MODULE_DESCRIPTION("HID driver for Corsair devices");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 86c95d30ac80..b26c030926c1 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -278,6 +278,9 @@
278#define USB_DEVICE_ID_CORSAIR_K70RGB 0x1b13 278#define USB_DEVICE_ID_CORSAIR_K70RGB 0x1b13
279#define USB_DEVICE_ID_CORSAIR_STRAFE 0x1b15 279#define USB_DEVICE_ID_CORSAIR_STRAFE 0x1b15
280#define USB_DEVICE_ID_CORSAIR_K65RGB 0x1b17 280#define USB_DEVICE_ID_CORSAIR_K65RGB 0x1b17
281#define USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE 0x1b38
282#define USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE 0x1b39
283#define USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB 0x1b3e
281 284
282#define USB_VENDOR_ID_CREATIVELABS 0x041e 285#define USB_VENDOR_ID_CREATIVELABS 0x041e
283#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c 286#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c
@@ -557,6 +560,7 @@
557 560
558#define USB_VENDOR_ID_JESS 0x0c45 561#define USB_VENDOR_ID_JESS 0x0c45
559#define USB_DEVICE_ID_JESS_YUREX 0x1010 562#define USB_DEVICE_ID_JESS_YUREX 0x1010
563#define USB_DEVICE_ID_JESS_ZEN_AIO_KBD 0x5112
560 564
561#define USB_VENDOR_ID_JESS2 0x0f30 565#define USB_VENDOR_ID_JESS2 0x0f30
562#define USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD 0x0111 566#define USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD 0x0111
@@ -1024,6 +1028,9 @@
1024#define USB_DEVICE_ID_UGEE_TABLET_45 0x0045 1028#define USB_DEVICE_ID_UGEE_TABLET_45 0x0045
1025#define USB_DEVICE_ID_YIYNOVA_TABLET 0x004d 1029#define USB_DEVICE_ID_YIYNOVA_TABLET 0x004d
1026 1030
1031#define USB_VENDOR_ID_UGEE 0x28bd
1032#define USB_DEVICE_ID_UGEE_TABLET_EX07S 0x0071
1033
1027#define USB_VENDOR_ID_UNITEC 0x227d 1034#define USB_VENDOR_ID_UNITEC 0x227d
1028#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709 1035#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709
1029#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19 0x0a19 1036#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19 0x0a19
@@ -1078,6 +1085,7 @@
1078 1085
1079#define USB_VENDOR_ID_XIN_MO 0x16c0 1086#define USB_VENDOR_ID_XIN_MO 0x16c0
1080#define USB_DEVICE_ID_XIN_MO_DUAL_ARCADE 0x05e1 1087#define USB_DEVICE_ID_XIN_MO_DUAL_ARCADE 0x05e1
1088#define USB_DEVICE_ID_THT_2P_ARCADE 0x75e1
1081 1089
1082#define USB_VENDOR_ID_XIROKU 0x1477 1090#define USB_VENDOR_ID_XIROKU 0x1477
1083#define USB_DEVICE_ID_XIROKU_SPX 0x1006 1091#define USB_DEVICE_ID_XIROKU_SPX 0x1006
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index f405b07d0381..740996f9bdd4 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -2632,6 +2632,8 @@ err_stop:
2632 sony_leds_remove(sc); 2632 sony_leds_remove(sc);
2633 if (sc->quirks & SONY_BATTERY_SUPPORT) 2633 if (sc->quirks & SONY_BATTERY_SUPPORT)
2634 sony_battery_remove(sc); 2634 sony_battery_remove(sc);
2635 if (sc->touchpad)
2636 sony_unregister_touchpad(sc);
2635 sony_cancel_work_sync(sc); 2637 sony_cancel_work_sync(sc);
2636 kfree(sc->output_report_dmabuf); 2638 kfree(sc->output_report_dmabuf);
2637 sony_remove_dev_list(sc); 2639 sony_remove_dev_list(sc);
diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c
index 1509d7287ff3..e3e6e5c893cc 100644
--- a/drivers/hid/hid-uclogic.c
+++ b/drivers/hid/hid-uclogic.c
@@ -977,6 +977,7 @@ static int uclogic_probe(struct hid_device *hdev,
977 } 977 }
978 break; 978 break;
979 case USB_DEVICE_ID_UGTIZER_TABLET_GP0610: 979 case USB_DEVICE_ID_UGTIZER_TABLET_GP0610:
980 case USB_DEVICE_ID_UGEE_TABLET_EX07S:
980 /* If this is the pen interface */ 981 /* If this is the pen interface */
981 if (intf->cur_altsetting->desc.bInterfaceNumber == 1) { 982 if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
982 rc = uclogic_tablet_enable(hdev); 983 rc = uclogic_tablet_enable(hdev);
@@ -1069,6 +1070,7 @@ static const struct hid_device_id uclogic_devices[] = {
1069 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, 1070 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
1070 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, 1071 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
1071 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, 1072 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
1073 { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
1072 { } 1074 { }
1073}; 1075};
1074MODULE_DEVICE_TABLE(hid, uclogic_devices); 1076MODULE_DEVICE_TABLE(hid, uclogic_devices);
diff --git a/drivers/hid/hid-xinmo.c b/drivers/hid/hid-xinmo.c
index 7df5227a7e61..9ad7731d2e10 100644
--- a/drivers/hid/hid-xinmo.c
+++ b/drivers/hid/hid-xinmo.c
@@ -46,6 +46,7 @@ static int xinmo_event(struct hid_device *hdev, struct hid_field *field,
46 46
47static const struct hid_device_id xinmo_devices[] = { 47static const struct hid_device_id xinmo_devices[] = {
48 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, 48 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
49 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
49 { } 50 { }
50}; 51};
51 52
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index d6847a664446..a69a3c88ab29 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -80,6 +80,9 @@ static const struct hid_blacklist {
80 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS }, 80 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS },
81 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS }, 81 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS },
82 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, 82 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
83 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
84 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
85 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
83 { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, 86 { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
84 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 87 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
85 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, 88 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index be8f7e2a026f..e2666ef84dc1 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -2165,6 +2165,14 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
2165 2165
2166 wacom_update_name(wacom, wireless ? " (WL)" : ""); 2166 wacom_update_name(wacom, wireless ? " (WL)" : "");
2167 2167
2168 /* pen only Bamboo neither support touch nor pad */
2169 if ((features->type == BAMBOO_PEN) &&
2170 ((features->device_type & WACOM_DEVICETYPE_TOUCH) ||
2171 (features->device_type & WACOM_DEVICETYPE_PAD))) {
2172 error = -ENODEV;
2173 goto fail;
2174 }
2175
2168 error = wacom_add_shared_data(hdev); 2176 error = wacom_add_shared_data(hdev);
2169 if (error) 2177 if (error)
2170 goto fail; 2178 goto fail;
@@ -2208,14 +2216,8 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
2208 /* touch only Bamboo doesn't support pen */ 2216 /* touch only Bamboo doesn't support pen */
2209 if ((features->type == BAMBOO_TOUCH) && 2217 if ((features->type == BAMBOO_TOUCH) &&
2210 (features->device_type & WACOM_DEVICETYPE_PEN)) { 2218 (features->device_type & WACOM_DEVICETYPE_PEN)) {
2211 error = -ENODEV; 2219 cancel_delayed_work_sync(&wacom->init_work);
2212 goto fail_quirks; 2220 _wacom_query_tablet_data(wacom);
2213 }
2214
2215 /* pen only Bamboo neither support touch nor pad */
2216 if ((features->type == BAMBOO_PEN) &&
2217 ((features->device_type & WACOM_DEVICETYPE_TOUCH) ||
2218 (features->device_type & WACOM_DEVICETYPE_PAD))) {
2219 error = -ENODEV; 2221 error = -ENODEV;
2220 goto fail_quirks; 2222 goto fail_quirks;
2221 } 2223 }
@@ -2579,7 +2581,9 @@ static void wacom_remove(struct hid_device *hdev)
2579 2581
2580 /* make sure we don't trigger the LEDs */ 2582 /* make sure we don't trigger the LEDs */
2581 wacom_led_groups_release(wacom); 2583 wacom_led_groups_release(wacom);
2582 wacom_release_resources(wacom); 2584
2585 if (wacom->wacom_wac.features.type != REMOTE)
2586 wacom_release_resources(wacom);
2583 2587
2584 hid_set_drvdata(hdev, NULL); 2588 hid_set_drvdata(hdev, NULL);
2585} 2589}
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 4aa3de9f1163..c68ac65db7ff 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1959,8 +1959,10 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
1959 input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH); 1959 input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH);
1960 input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL); 1960 input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL);
1961 input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH); 1961 input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
1962 input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE); 1962 if (!(features->device_type & WACOM_DEVICETYPE_DIRECT)) {
1963 input_set_capability(input, EV_KEY, BTN_TOOL_LENS); 1963 input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
1964 input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
1965 }
1964 break; 1966 break;
1965 case WACOM_HID_WD_FINGERWHEEL: 1967 case WACOM_HID_WD_FINGERWHEEL:
1966 wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0); 1968 wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
@@ -2004,7 +2006,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
2004 return; 2006 return;
2005 case HID_DG_TOOLSERIALNUMBER: 2007 case HID_DG_TOOLSERIALNUMBER:
2006 wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL); 2008 wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
2007 wacom_wac->serial[0] |= value; 2009 wacom_wac->serial[0] |= (__u32)value;
2008 return; 2010 return;
2009 case WACOM_HID_WD_SENSE: 2011 case WACOM_HID_WD_SENSE:
2010 wacom_wac->hid_data.sense_state = value; 2012 wacom_wac->hid_data.sense_state = value;
@@ -2174,6 +2176,16 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
2174 wacom_wac->hid_data.cc_index = field->index; 2176 wacom_wac->hid_data.cc_index = field->index;
2175 wacom_wac->hid_data.cc_value_index = usage->usage_index; 2177 wacom_wac->hid_data.cc_value_index = usage->usage_index;
2176 break; 2178 break;
2179 case HID_DG_CONTACTID:
2180 if ((field->logical_maximum - field->logical_minimum) < touch_max) {
2181 /*
2182 * The HID descriptor for G11 sensors leaves logical
2183 * maximum set to '1' despite it being a multitouch
2184 * device. Override to a sensible number.
2185 */
2186 field->logical_maximum = 255;
2187 }
2188 break;
2177 } 2189 }
2178} 2190}
2179 2191
@@ -4197,10 +4209,10 @@ static const struct wacom_features wacom_features_0x343 =
4197 WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; 4209 WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
4198static const struct wacom_features wacom_features_0x360 = 4210static const struct wacom_features wacom_features_0x360 =
4199 { "Wacom Intuos Pro M", 44800, 29600, 8191, 63, 4211 { "Wacom Intuos Pro M", 44800, 29600, 8191, 63,
4200 INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 }; 4212 INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 };
4201static const struct wacom_features wacom_features_0x361 = 4213static const struct wacom_features wacom_features_0x361 =
4202 { "Wacom Intuos Pro L", 62200, 43200, 8191, 63, 4214 { "Wacom Intuos Pro L", 62200, 43200, 8191, 63,
4203 INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 }; 4215 INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 };
4204 4216
4205static const struct wacom_features wacom_features_HID_ANY_ID = 4217static const struct wacom_features wacom_features_HID_ANY_ID =
4206 { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID }; 4218 { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index bd0d1988feb2..321b8833fa6f 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -502,12 +502,15 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
502 502
503 wait_for_completion(&info->waitevent); 503 wait_for_completion(&info->waitevent);
504 504
505 if (channel->rescind) {
506 ret = -ENODEV;
507 goto post_msg_err;
508 }
509
510post_msg_err: 505post_msg_err:
506 /*
507 * If the channel has been rescinded;
508 * we will be awakened by the rescind
509 * handler; set the error code to zero so we don't leak memory.
510 */
511 if (channel->rescind)
512 ret = 0;
513
511 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 514 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
512 list_del(&info->msglistentry); 515 list_del(&info->msglistentry);
513 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 516 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
@@ -530,15 +533,13 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
530 int ret; 533 int ret;
531 534
532 /* 535 /*
533 * vmbus_on_event(), running in the tasklet, can race 536 * vmbus_on_event(), running in the per-channel tasklet, can race
534 * with vmbus_close_internal() in the case of SMP guest, e.g., when 537 * with vmbus_close_internal() in the case of SMP guest, e.g., when
535 * the former is accessing channel->inbound.ring_buffer, the latter 538 * the former is accessing channel->inbound.ring_buffer, the latter
536 * could be freeing the ring_buffer pages. 539 * could be freeing the ring_buffer pages, so here we must stop it
537 * 540 * first.
538 * To resolve the race, we can serialize them by disabling the
539 * tasklet when the latter is running here.
540 */ 541 */
541 hv_event_tasklet_disable(channel); 542 tasklet_disable(&channel->callback_event);
542 543
543 /* 544 /*
544 * In case a device driver's probe() fails (e.g., 545 * In case a device driver's probe() fails (e.g.,
@@ -605,8 +606,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
605 get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); 606 get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
606 607
607out: 608out:
608 hv_event_tasklet_enable(channel);
609
610 return ret; 609 return ret;
611} 610}
612 611
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index f33465d78a02..fbcb06352308 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -350,7 +350,8 @@ static struct vmbus_channel *alloc_channel(void)
350static void free_channel(struct vmbus_channel *channel) 350static void free_channel(struct vmbus_channel *channel)
351{ 351{
352 tasklet_kill(&channel->callback_event); 352 tasklet_kill(&channel->callback_event);
353 kfree(channel); 353
354 kfree_rcu(channel, rcu);
354} 355}
355 356
356static void percpu_channel_enq(void *arg) 357static void percpu_channel_enq(void *arg)
@@ -359,14 +360,14 @@ static void percpu_channel_enq(void *arg)
359 struct hv_per_cpu_context *hv_cpu 360 struct hv_per_cpu_context *hv_cpu
360 = this_cpu_ptr(hv_context.cpu_context); 361 = this_cpu_ptr(hv_context.cpu_context);
361 362
362 list_add_tail(&channel->percpu_list, &hv_cpu->chan_list); 363 list_add_tail_rcu(&channel->percpu_list, &hv_cpu->chan_list);
363} 364}
364 365
365static void percpu_channel_deq(void *arg) 366static void percpu_channel_deq(void *arg)
366{ 367{
367 struct vmbus_channel *channel = arg; 368 struct vmbus_channel *channel = arg;
368 369
369 list_del(&channel->percpu_list); 370 list_del_rcu(&channel->percpu_list);
370} 371}
371 372
372 373
@@ -381,19 +382,6 @@ static void vmbus_release_relid(u32 relid)
381 true); 382 true);
382} 383}
383 384
384void hv_event_tasklet_disable(struct vmbus_channel *channel)
385{
386 tasklet_disable(&channel->callback_event);
387}
388
389void hv_event_tasklet_enable(struct vmbus_channel *channel)
390{
391 tasklet_enable(&channel->callback_event);
392
393 /* In case there is any pending event */
394 tasklet_schedule(&channel->callback_event);
395}
396
397void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) 385void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
398{ 386{
399 unsigned long flags; 387 unsigned long flags;
@@ -402,7 +390,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
402 BUG_ON(!channel->rescind); 390 BUG_ON(!channel->rescind);
403 BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); 391 BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
404 392
405 hv_event_tasklet_disable(channel);
406 if (channel->target_cpu != get_cpu()) { 393 if (channel->target_cpu != get_cpu()) {
407 put_cpu(); 394 put_cpu();
408 smp_call_function_single(channel->target_cpu, 395 smp_call_function_single(channel->target_cpu,
@@ -411,7 +398,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
411 percpu_channel_deq(channel); 398 percpu_channel_deq(channel);
412 put_cpu(); 399 put_cpu();
413 } 400 }
414 hv_event_tasklet_enable(channel);
415 401
416 if (channel->primary_channel == NULL) { 402 if (channel->primary_channel == NULL) {
417 list_del(&channel->listentry); 403 list_del(&channel->listentry);
@@ -505,7 +491,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
505 491
506 init_vp_index(newchannel, dev_type); 492 init_vp_index(newchannel, dev_type);
507 493
508 hv_event_tasklet_disable(newchannel);
509 if (newchannel->target_cpu != get_cpu()) { 494 if (newchannel->target_cpu != get_cpu()) {
510 put_cpu(); 495 put_cpu();
511 smp_call_function_single(newchannel->target_cpu, 496 smp_call_function_single(newchannel->target_cpu,
@@ -515,7 +500,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
515 percpu_channel_enq(newchannel); 500 percpu_channel_enq(newchannel);
516 put_cpu(); 501 put_cpu();
517 } 502 }
518 hv_event_tasklet_enable(newchannel);
519 503
520 /* 504 /*
521 * This state is used to indicate a successful open 505 * This state is used to indicate a successful open
@@ -565,7 +549,6 @@ err_deq_chan:
565 list_del(&newchannel->listentry); 549 list_del(&newchannel->listentry);
566 mutex_unlock(&vmbus_connection.channel_mutex); 550 mutex_unlock(&vmbus_connection.channel_mutex);
567 551
568 hv_event_tasklet_disable(newchannel);
569 if (newchannel->target_cpu != get_cpu()) { 552 if (newchannel->target_cpu != get_cpu()) {
570 put_cpu(); 553 put_cpu();
571 smp_call_function_single(newchannel->target_cpu, 554 smp_call_function_single(newchannel->target_cpu,
@@ -574,7 +557,6 @@ err_deq_chan:
574 percpu_channel_deq(newchannel); 557 percpu_channel_deq(newchannel);
575 put_cpu(); 558 put_cpu();
576 } 559 }
577 hv_event_tasklet_enable(newchannel);
578 560
579 vmbus_release_relid(newchannel->offermsg.child_relid); 561 vmbus_release_relid(newchannel->offermsg.child_relid);
580 562
@@ -814,6 +796,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
814 /* Allocate the channel object and save this offer. */ 796 /* Allocate the channel object and save this offer. */
815 newchannel = alloc_channel(); 797 newchannel = alloc_channel();
816 if (!newchannel) { 798 if (!newchannel) {
799 vmbus_release_relid(offer->child_relid);
817 pr_err("Unable to allocate channel object\n"); 800 pr_err("Unable to allocate channel object\n");
818 return; 801 return;
819 } 802 }
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index 9aee6014339d..a5596a642ed0 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -71,7 +71,6 @@ static DECLARE_WORK(fcopy_send_work, fcopy_send_data);
71static const char fcopy_devname[] = "vmbus/hv_fcopy"; 71static const char fcopy_devname[] = "vmbus/hv_fcopy";
72static u8 *recv_buffer; 72static u8 *recv_buffer;
73static struct hvutil_transport *hvt; 73static struct hvutil_transport *hvt;
74static struct completion release_event;
75/* 74/*
76 * This state maintains the version number registered by the daemon. 75 * This state maintains the version number registered by the daemon.
77 */ 76 */
@@ -331,7 +330,6 @@ static void fcopy_on_reset(void)
331 330
332 if (cancel_delayed_work_sync(&fcopy_timeout_work)) 331 if (cancel_delayed_work_sync(&fcopy_timeout_work))
333 fcopy_respond_to_host(HV_E_FAIL); 332 fcopy_respond_to_host(HV_E_FAIL);
334 complete(&release_event);
335} 333}
336 334
337int hv_fcopy_init(struct hv_util_service *srv) 335int hv_fcopy_init(struct hv_util_service *srv)
@@ -339,7 +337,6 @@ int hv_fcopy_init(struct hv_util_service *srv)
339 recv_buffer = srv->recv_buffer; 337 recv_buffer = srv->recv_buffer;
340 fcopy_transaction.recv_channel = srv->channel; 338 fcopy_transaction.recv_channel = srv->channel;
341 339
342 init_completion(&release_event);
343 /* 340 /*
344 * When this driver loads, the user level daemon that 341 * When this driver loads, the user level daemon that
345 * processes the host requests may not yet be running. 342 * processes the host requests may not yet be running.
@@ -361,5 +358,4 @@ void hv_fcopy_deinit(void)
361 fcopy_transaction.state = HVUTIL_DEVICE_DYING; 358 fcopy_transaction.state = HVUTIL_DEVICE_DYING;
362 cancel_delayed_work_sync(&fcopy_timeout_work); 359 cancel_delayed_work_sync(&fcopy_timeout_work);
363 hvutil_transport_destroy(hvt); 360 hvutil_transport_destroy(hvt);
364 wait_for_completion(&release_event);
365} 361}
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index de263712e247..a1adfe2cfb34 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -101,7 +101,6 @@ static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
101static const char kvp_devname[] = "vmbus/hv_kvp"; 101static const char kvp_devname[] = "vmbus/hv_kvp";
102static u8 *recv_buffer; 102static u8 *recv_buffer;
103static struct hvutil_transport *hvt; 103static struct hvutil_transport *hvt;
104static struct completion release_event;
105/* 104/*
106 * Register the kernel component with the user-level daemon. 105 * Register the kernel component with the user-level daemon.
107 * As part of this registration, pass the LIC version number. 106 * As part of this registration, pass the LIC version number.
@@ -714,7 +713,6 @@ static void kvp_on_reset(void)
714 if (cancel_delayed_work_sync(&kvp_timeout_work)) 713 if (cancel_delayed_work_sync(&kvp_timeout_work))
715 kvp_respond_to_host(NULL, HV_E_FAIL); 714 kvp_respond_to_host(NULL, HV_E_FAIL);
716 kvp_transaction.state = HVUTIL_DEVICE_INIT; 715 kvp_transaction.state = HVUTIL_DEVICE_INIT;
717 complete(&release_event);
718} 716}
719 717
720int 718int
@@ -723,7 +721,6 @@ hv_kvp_init(struct hv_util_service *srv)
723 recv_buffer = srv->recv_buffer; 721 recv_buffer = srv->recv_buffer;
724 kvp_transaction.recv_channel = srv->channel; 722 kvp_transaction.recv_channel = srv->channel;
725 723
726 init_completion(&release_event);
727 /* 724 /*
728 * When this driver loads, the user level daemon that 725 * When this driver loads, the user level daemon that
729 * processes the host requests may not yet be running. 726 * processes the host requests may not yet be running.
@@ -747,5 +744,4 @@ void hv_kvp_deinit(void)
747 cancel_delayed_work_sync(&kvp_timeout_work); 744 cancel_delayed_work_sync(&kvp_timeout_work);
748 cancel_work_sync(&kvp_sendkey_work); 745 cancel_work_sync(&kvp_sendkey_work);
749 hvutil_transport_destroy(hvt); 746 hvutil_transport_destroy(hvt);
750 wait_for_completion(&release_event);
751} 747}
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index bcc03f0748d6..e659d1b94a57 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -79,7 +79,6 @@ static int dm_reg_value;
79static const char vss_devname[] = "vmbus/hv_vss"; 79static const char vss_devname[] = "vmbus/hv_vss";
80static __u8 *recv_buffer; 80static __u8 *recv_buffer;
81static struct hvutil_transport *hvt; 81static struct hvutil_transport *hvt;
82static struct completion release_event;
83 82
84static void vss_timeout_func(struct work_struct *dummy); 83static void vss_timeout_func(struct work_struct *dummy);
85static void vss_handle_request(struct work_struct *dummy); 84static void vss_handle_request(struct work_struct *dummy);
@@ -361,13 +360,11 @@ static void vss_on_reset(void)
361 if (cancel_delayed_work_sync(&vss_timeout_work)) 360 if (cancel_delayed_work_sync(&vss_timeout_work))
362 vss_respond_to_host(HV_E_FAIL); 361 vss_respond_to_host(HV_E_FAIL);
363 vss_transaction.state = HVUTIL_DEVICE_INIT; 362 vss_transaction.state = HVUTIL_DEVICE_INIT;
364 complete(&release_event);
365} 363}
366 364
367int 365int
368hv_vss_init(struct hv_util_service *srv) 366hv_vss_init(struct hv_util_service *srv)
369{ 367{
370 init_completion(&release_event);
371 if (vmbus_proto_version < VERSION_WIN8_1) { 368 if (vmbus_proto_version < VERSION_WIN8_1) {
372 pr_warn("Integration service 'Backup (volume snapshot)'" 369 pr_warn("Integration service 'Backup (volume snapshot)'"
373 " not supported on this host version.\n"); 370 " not supported on this host version.\n");
@@ -400,5 +397,4 @@ void hv_vss_deinit(void)
400 cancel_delayed_work_sync(&vss_timeout_work); 397 cancel_delayed_work_sync(&vss_timeout_work);
401 cancel_work_sync(&vss_handle_request_work); 398 cancel_work_sync(&vss_handle_request_work);
402 hvutil_transport_destroy(hvt); 399 hvutil_transport_destroy(hvt);
403 wait_for_completion(&release_event);
404} 400}
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 3042eaa13062..186b10083c55 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -590,6 +590,8 @@ static int hv_timesync_init(struct hv_util_service *srv)
590 if (!hyperv_cs) 590 if (!hyperv_cs)
591 return -ENODEV; 591 return -ENODEV;
592 592
593 spin_lock_init(&host_ts.lock);
594
593 INIT_WORK(&wrk.work, hv_set_host_time); 595 INIT_WORK(&wrk.work, hv_set_host_time);
594 596
595 /* 597 /*
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
index c235a9515267..4402a71e23f7 100644
--- a/drivers/hv/hv_utils_transport.c
+++ b/drivers/hv/hv_utils_transport.c
@@ -182,10 +182,11 @@ static int hvt_op_release(struct inode *inode, struct file *file)
182 * connects back. 182 * connects back.
183 */ 183 */
184 hvt_reset(hvt); 184 hvt_reset(hvt);
185 mutex_unlock(&hvt->lock);
186 185
187 if (mode_old == HVUTIL_TRANSPORT_DESTROY) 186 if (mode_old == HVUTIL_TRANSPORT_DESTROY)
188 hvt_transport_free(hvt); 187 complete(&hvt->release);
188
189 mutex_unlock(&hvt->lock);
189 190
190 return 0; 191 return 0;
191} 192}
@@ -304,6 +305,7 @@ struct hvutil_transport *hvutil_transport_init(const char *name,
304 305
305 init_waitqueue_head(&hvt->outmsg_q); 306 init_waitqueue_head(&hvt->outmsg_q);
306 mutex_init(&hvt->lock); 307 mutex_init(&hvt->lock);
308 init_completion(&hvt->release);
307 309
308 spin_lock(&hvt_list_lock); 310 spin_lock(&hvt_list_lock);
309 list_add(&hvt->list, &hvt_list); 311 list_add(&hvt->list, &hvt_list);
@@ -351,6 +353,8 @@ void hvutil_transport_destroy(struct hvutil_transport *hvt)
351 if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0) 353 if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0)
352 cn_del_callback(&hvt->cn_id); 354 cn_del_callback(&hvt->cn_id);
353 355
354 if (mode_old != HVUTIL_TRANSPORT_CHARDEV) 356 if (mode_old == HVUTIL_TRANSPORT_CHARDEV)
355 hvt_transport_free(hvt); 357 wait_for_completion(&hvt->release);
358
359 hvt_transport_free(hvt);
356} 360}
diff --git a/drivers/hv/hv_utils_transport.h b/drivers/hv/hv_utils_transport.h
index d98f5225c3e6..79afb626e166 100644
--- a/drivers/hv/hv_utils_transport.h
+++ b/drivers/hv/hv_utils_transport.h
@@ -41,6 +41,7 @@ struct hvutil_transport {
41 int outmsg_len; /* its length */ 41 int outmsg_len; /* its length */
42 wait_queue_head_t outmsg_q; /* poll/read wait queue */ 42 wait_queue_head_t outmsg_q; /* poll/read wait queue */
43 struct mutex lock; /* protects struct members */ 43 struct mutex lock; /* protects struct members */
44 struct completion release; /* synchronize with fd release */
44}; 45};
45 46
46struct hvutil_transport *hvutil_transport_init(const char *name, 47struct hvutil_transport *hvutil_transport_init(const char *name,
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index da6b59ba5940..8370b9dc6037 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -939,8 +939,10 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
939 if (relid == 0) 939 if (relid == 0)
940 continue; 940 continue;
941 941
942 rcu_read_lock();
943
942 /* Find channel based on relid */ 944 /* Find channel based on relid */
943 list_for_each_entry(channel, &hv_cpu->chan_list, percpu_list) { 945 list_for_each_entry_rcu(channel, &hv_cpu->chan_list, percpu_list) {
944 if (channel->offermsg.child_relid != relid) 946 if (channel->offermsg.child_relid != relid)
945 continue; 947 continue;
946 948
@@ -956,6 +958,8 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
956 tasklet_schedule(&channel->callback_event); 958 tasklet_schedule(&channel->callback_event);
957 } 959 }
958 } 960 }
961
962 rcu_read_unlock();
959 } 963 }
960} 964}
961 965
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index cccef87963e0..975c43d446f8 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -646,6 +646,9 @@ static int atk_read_value(struct atk_sensor_data *sensor, u64 *value)
646 else 646 else
647 err = atk_read_value_new(sensor, value); 647 err = atk_read_value_new(sensor, value);
648 648
649 if (err)
650 return err;
651
649 sensor->is_valid = true; 652 sensor->is_valid = true;
650 sensor->last_updated = jiffies; 653 sensor->last_updated = jiffies;
651 sensor->cached_value = *value; 654 sensor->cached_value = *value;
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index efb01c247e2d..4dfc7238313e 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -3198,7 +3198,7 @@ static int __init sm_it87_init(void)
3198{ 3198{
3199 int sioaddr[2] = { REG_2E, REG_4E }; 3199 int sioaddr[2] = { REG_2E, REG_4E };
3200 struct it87_sio_data sio_data; 3200 struct it87_sio_data sio_data;
3201 unsigned short isa_address; 3201 unsigned short isa_address[2];
3202 bool found = false; 3202 bool found = false;
3203 int i, err; 3203 int i, err;
3204 3204
@@ -3208,15 +3208,29 @@ static int __init sm_it87_init(void)
3208 3208
3209 for (i = 0; i < ARRAY_SIZE(sioaddr); i++) { 3209 for (i = 0; i < ARRAY_SIZE(sioaddr); i++) {
3210 memset(&sio_data, 0, sizeof(struct it87_sio_data)); 3210 memset(&sio_data, 0, sizeof(struct it87_sio_data));
3211 isa_address = 0; 3211 isa_address[i] = 0;
3212 err = it87_find(sioaddr[i], &isa_address, &sio_data); 3212 err = it87_find(sioaddr[i], &isa_address[i], &sio_data);
3213 if (err || isa_address == 0) 3213 if (err || isa_address[i] == 0)
3214 continue; 3214 continue;
3215 /*
3216 * Don't register second chip if its ISA address matches
3217 * the first chip's ISA address.
3218 */
3219 if (i && isa_address[i] == isa_address[0])
3220 break;
3215 3221
3216 err = it87_device_add(i, isa_address, &sio_data); 3222 err = it87_device_add(i, isa_address[i], &sio_data);
3217 if (err) 3223 if (err)
3218 goto exit_dev_unregister; 3224 goto exit_dev_unregister;
3225
3219 found = true; 3226 found = true;
3227
3228 /*
3229 * IT8705F may respond on both SIO addresses.
3230 * Stop probing after finding one.
3231 */
3232 if (sio_data.type == it87)
3233 break;
3220 } 3234 }
3221 3235
3222 if (!found) { 3236 if (!found) {
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index c1b9275978f9..281491cca510 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -311,7 +311,7 @@ static int max31790_write_pwm(struct device *dev, u32 attr, int channel,
311 data->pwm[channel] = val << 8; 311 data->pwm[channel] = val << 8;
312 err = i2c_smbus_write_word_swapped(client, 312 err = i2c_smbus_write_word_swapped(client,
313 MAX31790_REG_PWMOUT(channel), 313 MAX31790_REG_PWMOUT(channel),
314 val); 314 data->pwm[channel]);
315 break; 315 break;
316 case hwmon_pwm_enable: 316 case hwmon_pwm_enable:
317 fan_config = data->fan_config[channel]; 317 fan_config = data->fan_config[channel];
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index cdd9b3b26195..7563eceeaaea 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -221,8 +221,10 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
221 else 221 else
222 intel_th_trace_enable(thdev); 222 intel_th_trace_enable(thdev);
223 223
224 if (ret) 224 if (ret) {
225 pm_runtime_put(&thdev->dev); 225 pm_runtime_put(&thdev->dev);
226 module_put(thdrv->driver.owner);
227 }
226 228
227 return ret; 229 return ret;
228} 230}
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 0bba3842336e..590cf90dd21a 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -85,6 +85,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
85 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6), 85 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
86 .driver_data = (kernel_ulong_t)0, 86 .driver_data = (kernel_ulong_t)0,
87 }, 87 },
88 {
89 /* Denverton */
90 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x19e1),
91 .driver_data = (kernel_ulong_t)0,
92 },
93 {
94 /* Gemini Lake */
95 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
96 .driver_data = (kernel_ulong_t)0,
97 },
88 { 0 }, 98 { 0 },
89}; 99};
90 100
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index dfc1c0e37c40..ad31d21da316 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -35,7 +35,6 @@
35 * warranty of any kind, whether express or implied. 35 * warranty of any kind, whether express or implied.
36 */ 36 */
37 37
38#include <linux/acpi.h>
39#include <linux/device.h> 38#include <linux/device.h>
40#include <linux/gpio/consumer.h> 39#include <linux/gpio/consumer.h>
41#include <linux/i2c.h> 40#include <linux/i2c.h>
@@ -117,6 +116,10 @@ static const struct chip_desc chips[] = {
117 .has_irq = 1, 116 .has_irq = 1,
118 .muxtype = pca954x_isswi, 117 .muxtype = pca954x_isswi,
119 }, 118 },
119 [pca_9546] = {
120 .nchans = 4,
121 .muxtype = pca954x_isswi,
122 },
120 [pca_9547] = { 123 [pca_9547] = {
121 .nchans = 8, 124 .nchans = 8,
122 .enable = 0x8, 125 .enable = 0x8,
@@ -134,28 +137,13 @@ static const struct i2c_device_id pca954x_id[] = {
134 { "pca9543", pca_9543 }, 137 { "pca9543", pca_9543 },
135 { "pca9544", pca_9544 }, 138 { "pca9544", pca_9544 },
136 { "pca9545", pca_9545 }, 139 { "pca9545", pca_9545 },
137 { "pca9546", pca_9545 }, 140 { "pca9546", pca_9546 },
138 { "pca9547", pca_9547 }, 141 { "pca9547", pca_9547 },
139 { "pca9548", pca_9548 }, 142 { "pca9548", pca_9548 },
140 { } 143 { }
141}; 144};
142MODULE_DEVICE_TABLE(i2c, pca954x_id); 145MODULE_DEVICE_TABLE(i2c, pca954x_id);
143 146
144#ifdef CONFIG_ACPI
145static const struct acpi_device_id pca954x_acpi_ids[] = {
146 { .id = "PCA9540", .driver_data = pca_9540 },
147 { .id = "PCA9542", .driver_data = pca_9542 },
148 { .id = "PCA9543", .driver_data = pca_9543 },
149 { .id = "PCA9544", .driver_data = pca_9544 },
150 { .id = "PCA9545", .driver_data = pca_9545 },
151 { .id = "PCA9546", .driver_data = pca_9545 },
152 { .id = "PCA9547", .driver_data = pca_9547 },
153 { .id = "PCA9548", .driver_data = pca_9548 },
154 { }
155};
156MODULE_DEVICE_TABLE(acpi, pca954x_acpi_ids);
157#endif
158
159#ifdef CONFIG_OF 147#ifdef CONFIG_OF
160static const struct of_device_id pca954x_of_match[] = { 148static const struct of_device_id pca954x_of_match[] = {
161 { .compatible = "nxp,pca9540", .data = &chips[pca_9540] }, 149 { .compatible = "nxp,pca9540", .data = &chips[pca_9540] },
@@ -393,17 +381,8 @@ static int pca954x_probe(struct i2c_client *client,
393 match = of_match_device(of_match_ptr(pca954x_of_match), &client->dev); 381 match = of_match_device(of_match_ptr(pca954x_of_match), &client->dev);
394 if (match) 382 if (match)
395 data->chip = of_device_get_match_data(&client->dev); 383 data->chip = of_device_get_match_data(&client->dev);
396 else if (id) 384 else
397 data->chip = &chips[id->driver_data]; 385 data->chip = &chips[id->driver_data];
398 else {
399 const struct acpi_device_id *acpi_id;
400
401 acpi_id = acpi_match_device(ACPI_PTR(pca954x_acpi_ids),
402 &client->dev);
403 if (!acpi_id)
404 return -ENODEV;
405 data->chip = &chips[acpi_id->driver_data];
406 }
407 386
408 data->last_chan = 0; /* force the first selection */ 387 data->last_chan = 0; /* force the first selection */
409 388
@@ -492,7 +471,6 @@ static struct i2c_driver pca954x_driver = {
492 .name = "pca954x", 471 .name = "pca954x",
493 .pm = &pca954x_pm, 472 .pm = &pca954x_pm,
494 .of_match_table = of_match_ptr(pca954x_of_match), 473 .of_match_table = of_match_ptr(pca954x_of_match),
495 .acpi_match_table = ACPI_PTR(pca954x_acpi_ids),
496 }, 474 },
497 .probe = pca954x_probe, 475 .probe = pca954x_probe,
498 .remove = pca954x_remove, 476 .remove = pca954x_remove,
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index ca5759c0c318..43a6cb078193 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -370,10 +370,12 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
370 name = "accel_3d"; 370 name = "accel_3d";
371 channel_spec = accel_3d_channels; 371 channel_spec = accel_3d_channels;
372 channel_size = sizeof(accel_3d_channels); 372 channel_size = sizeof(accel_3d_channels);
373 indio_dev->num_channels = ARRAY_SIZE(accel_3d_channels);
373 } else { 374 } else {
374 name = "gravity"; 375 name = "gravity";
375 channel_spec = gravity_channels; 376 channel_spec = gravity_channels;
376 channel_size = sizeof(gravity_channels); 377 channel_size = sizeof(gravity_channels);
378 indio_dev->num_channels = ARRAY_SIZE(gravity_channels);
377 } 379 }
378 ret = hid_sensor_parse_common_attributes(hsdev, hsdev->usage, 380 ret = hid_sensor_parse_common_attributes(hsdev, hsdev->usage,
379 &accel_state->common_attributes); 381 &accel_state->common_attributes);
@@ -395,7 +397,6 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
395 goto error_free_dev_mem; 397 goto error_free_dev_mem;
396 } 398 }
397 399
398 indio_dev->num_channels = ARRAY_SIZE(accel_3d_channels);
399 indio_dev->dev.parent = &pdev->dev; 400 indio_dev->dev.parent = &pdev->dev;
400 indio_dev->info = &accel_3d_info; 401 indio_dev->info = &accel_3d_info;
401 indio_dev->name = name; 402 indio_dev->name = name;
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index ad9dec30bb30..4282ceca3d8f 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -169,7 +169,9 @@ static irqreturn_t tiadc_irq_h(int irq, void *private)
169{ 169{
170 struct iio_dev *indio_dev = private; 170 struct iio_dev *indio_dev = private;
171 struct tiadc_device *adc_dev = iio_priv(indio_dev); 171 struct tiadc_device *adc_dev = iio_priv(indio_dev);
172 unsigned int status, config; 172 unsigned int status, config, adc_fsm;
173 unsigned short count = 0;
174
173 status = tiadc_readl(adc_dev, REG_IRQSTATUS); 175 status = tiadc_readl(adc_dev, REG_IRQSTATUS);
174 176
175 /* 177 /*
@@ -183,6 +185,15 @@ static irqreturn_t tiadc_irq_h(int irq, void *private)
183 tiadc_writel(adc_dev, REG_CTRL, config); 185 tiadc_writel(adc_dev, REG_CTRL, config);
184 tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1OVRRUN 186 tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1OVRRUN
185 | IRQENB_FIFO1UNDRFLW | IRQENB_FIFO1THRES); 187 | IRQENB_FIFO1UNDRFLW | IRQENB_FIFO1THRES);
188
189 /* wait for idle state.
190 * ADC needs to finish the current conversion
191 * before disabling the module
192 */
193 do {
194 adc_fsm = tiadc_readl(adc_dev, REG_ADCFSM);
195 } while (adc_fsm != 0x10 && count++ < 100);
196
186 tiadc_writel(adc_dev, REG_CTRL, (config | CNTRLREG_TSCSSENB)); 197 tiadc_writel(adc_dev, REG_CTRL, (config | CNTRLREG_TSCSSENB));
187 return IRQ_HANDLED; 198 return IRQ_HANDLED;
188 } else if (status & IRQENB_FIFO1THRES) { 199 } else if (status & IRQENB_FIFO1THRES) {
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index d6c372bb433b..c17596f7ed2c 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -61,7 +61,7 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
61 ret = st->core.read_ec_sensors_data(indio_dev, 1 << idx, &data); 61 ret = st->core.read_ec_sensors_data(indio_dev, 1 << idx, &data);
62 if (ret < 0) 62 if (ret < 0)
63 break; 63 break;
64 64 ret = IIO_VAL_INT;
65 *val = data; 65 *val = data;
66 break; 66 break;
67 case IIO_CHAN_INFO_CALIBBIAS: 67 case IIO_CHAN_INFO_CALIBBIAS:
@@ -76,7 +76,7 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
76 for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++) 76 for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
77 st->core.calib[i] = 77 st->core.calib[i] =
78 st->core.resp->sensor_offset.offset[i]; 78 st->core.resp->sensor_offset.offset[i];
79 79 ret = IIO_VAL_INT;
80 *val = st->core.calib[idx]; 80 *val = st->core.calib[idx];
81 break; 81 break;
82 case IIO_CHAN_INFO_SCALE: 82 case IIO_CHAN_INFO_SCALE:
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index 7afdac42ed42..01e02b9926d4 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -379,6 +379,8 @@ int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
379{ 379{
380 380
381 struct hid_sensor_hub_attribute_info timestamp; 381 struct hid_sensor_hub_attribute_info timestamp;
382 s32 value;
383 int ret;
382 384
383 hid_sensor_get_reporting_interval(hsdev, usage_id, st); 385 hid_sensor_get_reporting_interval(hsdev, usage_id, st);
384 386
@@ -417,6 +419,14 @@ int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
417 st->sensitivity.index, st->sensitivity.report_id, 419 st->sensitivity.index, st->sensitivity.report_id,
418 timestamp.index, timestamp.report_id); 420 timestamp.index, timestamp.report_id);
419 421
422 ret = sensor_hub_get_feature(hsdev,
423 st->power_state.report_id,
424 st->power_state.index, sizeof(value), &value);
425 if (ret < 0)
426 return ret;
427 if (value < 0)
428 return -EINVAL;
429
420 return 0; 430 return 0;
421} 431}
422EXPORT_SYMBOL(hid_sensor_parse_common_attributes); 432EXPORT_SYMBOL(hid_sensor_parse_common_attributes);
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index a3cce3a38300..ecf592d69043 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -51,8 +51,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
51 st->report_state.report_id, 51 st->report_state.report_id,
52 st->report_state.index, 52 st->report_state.index,
53 HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM); 53 HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM);
54
55 poll_value = hid_sensor_read_poll_value(st);
56 } else { 54 } else {
57 int val; 55 int val;
58 56
@@ -89,7 +87,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
89 sensor_hub_get_feature(st->hsdev, st->power_state.report_id, 87 sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
90 st->power_state.index, 88 st->power_state.index,
91 sizeof(state_val), &state_val); 89 sizeof(state_val), &state_val);
92 if (state && poll_value) 90 if (state)
91 poll_value = hid_sensor_read_poll_value(st);
92 if (poll_value > 0)
93 msleep_interruptible(poll_value * 2); 93 msleep_interruptible(poll_value * 2);
94 94
95 return 0; 95 return 0;
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index f7fcfa886f72..821919dd245b 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -27,6 +27,7 @@
27#include <linux/iio/trigger_consumer.h> 27#include <linux/iio/trigger_consumer.h>
28#include <linux/iio/triggered_buffer.h> 28#include <linux/iio/triggered_buffer.h>
29#include <linux/regmap.h> 29#include <linux/regmap.h>
30#include <linux/delay.h>
30#include "bmg160.h" 31#include "bmg160.h"
31 32
32#define BMG160_IRQ_NAME "bmg160_event" 33#define BMG160_IRQ_NAME "bmg160_event"
@@ -52,6 +53,9 @@
52#define BMG160_DEF_BW 100 53#define BMG160_DEF_BW 100
53#define BMG160_REG_PMU_BW_RES BIT(7) 54#define BMG160_REG_PMU_BW_RES BIT(7)
54 55
56#define BMG160_GYRO_REG_RESET 0x14
57#define BMG160_GYRO_RESET_VAL 0xb6
58
55#define BMG160_REG_INT_MAP_0 0x17 59#define BMG160_REG_INT_MAP_0 0x17
56#define BMG160_INT_MAP_0_BIT_ANY BIT(1) 60#define BMG160_INT_MAP_0_BIT_ANY BIT(1)
57 61
@@ -236,6 +240,14 @@ static int bmg160_chip_init(struct bmg160_data *data)
236 int ret; 240 int ret;
237 unsigned int val; 241 unsigned int val;
238 242
243 /*
244 * Reset chip to get it in a known good state. A delay of 30ms after
245 * reset is required according to the datasheet.
246 */
247 regmap_write(data->regmap, BMG160_GYRO_REG_RESET,
248 BMG160_GYRO_RESET_VAL);
249 usleep_range(30000, 30700);
250
239 ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val); 251 ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val);
240 if (ret < 0) { 252 if (ret < 0) {
241 dev_err(dev, "Error reading reg_chip_id\n"); 253 dev_err(dev, "Error reading reg_chip_id\n");
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 78532ce07449..81b572d7699a 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -193,8 +193,8 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
193 if (err < 0) 193 if (err < 0)
194 goto out; 194 goto out;
195 195
196 fifo_watermark = ((data & ~ST_LSM6DSX_FIFO_TH_MASK) << 8) | 196 fifo_watermark = ((data << 8) & ~ST_LSM6DSX_FIFO_TH_MASK) |
197 (fifo_watermark & ST_LSM6DSX_FIFO_TH_MASK); 197 (fifo_watermark & ST_LSM6DSX_FIFO_TH_MASK);
198 198
199 wdata = cpu_to_le16(fifo_watermark); 199 wdata = cpu_to_le16(fifo_watermark);
200 err = hw->tf->write(hw->dev, ST_LSM6DSX_REG_FIFO_THL_ADDR, 200 err = hw->tf->write(hw->dev, ST_LSM6DSX_REG_FIFO_THL_ADDR,
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index d18ded45bedd..3ff91e02fee3 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -610,10 +610,9 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
610 tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1); 610 tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1);
611 return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1)); 611 return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
612 case IIO_VAL_FRACTIONAL_LOG2: 612 case IIO_VAL_FRACTIONAL_LOG2:
613 tmp = (s64)vals[0] * 1000000000LL >> vals[1]; 613 tmp = shift_right((s64)vals[0] * 1000000000LL, vals[1]);
614 tmp1 = do_div(tmp, 1000000000LL); 614 tmp0 = (int)div_s64_rem(tmp, 1000000000LL, &tmp1);
615 tmp0 = tmp; 615 return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
616 return snprintf(buf, len, "%d.%09u", tmp0, tmp1);
617 case IIO_VAL_INT_MULTIPLE: 616 case IIO_VAL_INT_MULTIPLE:
618 { 617 {
619 int i; 618 int i;
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 6dd8cbd7ce95..e13370dc9b1c 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -763,7 +763,7 @@ power_off:
763 return ret; 763 return ret;
764} 764}
765 765
766static int __exit ak8974_remove(struct i2c_client *i2c) 766static int ak8974_remove(struct i2c_client *i2c)
767{ 767{
768 struct iio_dev *indio_dev = i2c_get_clientdata(i2c); 768 struct iio_dev *indio_dev = i2c_get_clientdata(i2c);
769 struct ak8974 *ak8974 = iio_priv(indio_dev); 769 struct ak8974 *ak8974 = iio_priv(indio_dev);
@@ -845,7 +845,7 @@ static struct i2c_driver ak8974_driver = {
845 .of_match_table = of_match_ptr(ak8974_of_match), 845 .of_match_table = of_match_ptr(ak8974_of_match),
846 }, 846 },
847 .probe = ak8974_probe, 847 .probe = ak8974_probe,
848 .remove = __exit_p(ak8974_remove), 848 .remove = ak8974_remove,
849 .id_table = ak8974_id, 849 .id_table = ak8974_id,
850}; 850};
851module_i2c_driver(ak8974_driver); 851module_i2c_driver(ak8974_driver);
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 5f2680855552..fd0edca0e656 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -457,6 +457,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
457 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 457 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
458 }, 458 },
459 .multi_read_bit = true, 459 .multi_read_bit = true,
460 .bootime = 2,
460 }, 461 },
461}; 462};
462 463
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index e95510117a6d..f2ae75fa3128 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -29,7 +29,13 @@ static int __ib_process_cq(struct ib_cq *cq, int budget)
29{ 29{
30 int i, n, completed = 0; 30 int i, n, completed = 0;
31 31
32 while ((n = ib_poll_cq(cq, IB_POLL_BATCH, cq->wc)) > 0) { 32 /*
33 * budget might be (-1) if the caller does not
34 * want to bound this call, thus we need unsigned
35 * minimum here.
36 */
37 while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH,
38 budget - completed), cq->wc)) > 0) {
33 for (i = 0; i < n; i++) { 39 for (i = 0; i < n; i++) {
34 struct ib_wc *wc = &cq->wc[i]; 40 struct ib_wc *wc = &cq->wc[i];
35 41
@@ -196,7 +202,7 @@ void ib_free_cq(struct ib_cq *cq)
196 irq_poll_disable(&cq->iop); 202 irq_poll_disable(&cq->iop);
197 break; 203 break;
198 case IB_POLL_WORKQUEUE: 204 case IB_POLL_WORKQUEUE:
199 flush_work(&cq->work); 205 cancel_work_sync(&cq->work);
200 break; 206 break;
201 default: 207 default:
202 WARN_ON_ONCE(1); 208 WARN_ON_ONCE(1);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 593d2ce6ec7c..7c9e34d679d3 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -336,12 +336,26 @@ int ib_register_device(struct ib_device *device,
336 struct device *parent = device->dev.parent; 336 struct device *parent = device->dev.parent;
337 337
338 WARN_ON_ONCE(!parent); 338 WARN_ON_ONCE(!parent);
339 if (!device->dev.dma_ops) 339 WARN_ON_ONCE(device->dma_device);
340 device->dev.dma_ops = parent->dma_ops; 340 if (device->dev.dma_ops) {
341 if (!device->dev.dma_mask) 341 /*
342 device->dev.dma_mask = parent->dma_mask; 342 * The caller provided custom DMA operations. Copy the
343 if (!device->dev.coherent_dma_mask) 343 * DMA-related fields that are used by e.g. dma_alloc_coherent()
344 device->dev.coherent_dma_mask = parent->coherent_dma_mask; 344 * into device->dev.
345 */
346 device->dma_device = &device->dev;
347 if (!device->dev.dma_mask)
348 device->dev.dma_mask = parent->dma_mask;
349 if (!device->dev.coherent_dma_mask)
350 device->dev.coherent_dma_mask =
351 parent->coherent_dma_mask;
352 } else {
353 /*
354 * The caller did not provide custom DMA operations. Use the
355 * DMA mapping operations of the parent device.
356 */
357 device->dma_device = parent;
358 }
345 359
346 mutex_lock(&device_mutex); 360 mutex_lock(&device_mutex);
347 361
@@ -1015,8 +1029,7 @@ static int __init ib_core_init(void)
1015 return -ENOMEM; 1029 return -ENOMEM;
1016 1030
1017 ib_comp_wq = alloc_workqueue("ib-comp-wq", 1031 ib_comp_wq = alloc_workqueue("ib-comp-wq",
1018 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 1032 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
1019 WQ_UNBOUND_MAX_ACTIVE);
1020 if (!ib_comp_wq) { 1033 if (!ib_comp_wq) {
1021 ret = -ENOMEM; 1034 ret = -ENOMEM;
1022 goto err; 1035 goto err;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 0f5d43d1f5fc..70c3e9e79508 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -160,6 +160,9 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
160 return NOTIFY_DONE; 160 return NOTIFY_DONE;
161 161
162 iwdev = &hdl->device; 162 iwdev = &hdl->device;
163 if (iwdev->init_state < INET_NOTIFIER)
164 return NOTIFY_DONE;
165
163 netdev = iwdev->ldev->netdev; 166 netdev = iwdev->ldev->netdev;
164 upper_dev = netdev_master_upper_dev_get(netdev); 167 upper_dev = netdev_master_upper_dev_get(netdev);
165 if (netdev != event_netdev) 168 if (netdev != event_netdev)
@@ -214,6 +217,9 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
214 return NOTIFY_DONE; 217 return NOTIFY_DONE;
215 218
216 iwdev = &hdl->device; 219 iwdev = &hdl->device;
220 if (iwdev->init_state < INET_NOTIFIER)
221 return NOTIFY_DONE;
222
217 netdev = iwdev->ldev->netdev; 223 netdev = iwdev->ldev->netdev;
218 if (netdev != event_netdev) 224 if (netdev != event_netdev)
219 return NOTIFY_DONE; 225 return NOTIFY_DONE;
@@ -260,6 +266,8 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *
260 if (!iwhdl) 266 if (!iwhdl)
261 return NOTIFY_DONE; 267 return NOTIFY_DONE;
262 iwdev = &iwhdl->device; 268 iwdev = &iwhdl->device;
269 if (iwdev->init_state < INET_NOTIFIER)
270 return NOTIFY_DONE;
263 p = (__be32 *)neigh->primary_key; 271 p = (__be32 *)neigh->primary_key;
264 i40iw_copy_ip_ntohl(local_ipaddr, p); 272 i40iw_copy_ip_ntohl(local_ipaddr, p);
265 if (neigh->nud_state & NUD_VALID) { 273 if (neigh->nud_state & NUD_VALID) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index bc9fb144e57b..c52edeafd616 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -372,7 +372,7 @@ static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
372 return 0; 372 return 0;
373} 373}
374 374
375static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id, 375static int ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
376 bool dpp_pool) 376 bool dpp_pool)
377{ 377{
378 int status; 378 int status;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 12c4208fd701..af9f596bb68b 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -7068,7 +7068,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
7068 unsigned long flags; 7068 unsigned long flags;
7069 7069
7070 while (wait) { 7070 while (wait) {
7071 unsigned long shadow; 7071 unsigned long shadow = 0;
7072 int cstart, previ = -1; 7072 int cstart, previ = -1;
7073 7073
7074 /* 7074 /*
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 3cd96c1b9502..9fbe22d3467b 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -69,6 +69,9 @@
69 */ 69 */
70#define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820 70#define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820
71 71
72#define PVRDMA_NUM_RING_PAGES 4
73#define PVRDMA_QP_NUM_HEADER_PAGES 1
74
72struct pvrdma_dev; 75struct pvrdma_dev;
73 76
74struct pvrdma_page_dir { 77struct pvrdma_page_dir {
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
index e69d6f3cae32..09078ccfaec7 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
@@ -132,7 +132,7 @@ enum pvrdma_pci_resource {
132 132
133enum pvrdma_device_ctl { 133enum pvrdma_device_ctl {
134 PVRDMA_DEVICE_CTL_ACTIVATE, /* Activate device. */ 134 PVRDMA_DEVICE_CTL_ACTIVATE, /* Activate device. */
135 PVRDMA_DEVICE_CTL_QUIESCE, /* Quiesce device. */ 135 PVRDMA_DEVICE_CTL_UNQUIESCE, /* Unquiesce device. */
136 PVRDMA_DEVICE_CTL_RESET, /* Reset device. */ 136 PVRDMA_DEVICE_CTL_RESET, /* Reset device. */
137}; 137};
138 138
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 100bea5c42ff..34ebc7615411 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -56,7 +56,7 @@
56#include "pvrdma.h" 56#include "pvrdma.h"
57 57
58#define DRV_NAME "vmw_pvrdma" 58#define DRV_NAME "vmw_pvrdma"
59#define DRV_VERSION "1.0.0.0-k" 59#define DRV_VERSION "1.0.1.0-k"
60 60
61static DEFINE_MUTEX(pvrdma_device_list_lock); 61static DEFINE_MUTEX(pvrdma_device_list_lock);
62static LIST_HEAD(pvrdma_device_list); 62static LIST_HEAD(pvrdma_device_list);
@@ -660,7 +660,16 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
660 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); 660 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
661 break; 661 break;
662 case NETDEV_UP: 662 case NETDEV_UP:
663 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); 663 pvrdma_write_reg(dev, PVRDMA_REG_CTL,
664 PVRDMA_DEVICE_CTL_UNQUIESCE);
665
666 mb();
667
668 if (pvrdma_read_reg(dev, PVRDMA_REG_ERR))
669 dev_err(&dev->pdev->dev,
670 "failed to activate device during link up\n");
671 else
672 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
664 break; 673 break;
665 default: 674 default:
666 dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n", 675 dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n",
@@ -858,7 +867,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
858 dev->dsr->resp_slot_dma = (u64)slot_dma; 867 dev->dsr->resp_slot_dma = (u64)slot_dma;
859 868
860 /* Async event ring */ 869 /* Async event ring */
861 dev->dsr->async_ring_pages.num_pages = 4; 870 dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
862 ret = pvrdma_page_dir_init(dev, &dev->async_pdir, 871 ret = pvrdma_page_dir_init(dev, &dev->async_pdir,
863 dev->dsr->async_ring_pages.num_pages, true); 872 dev->dsr->async_ring_pages.num_pages, true);
864 if (ret) 873 if (ret)
@@ -867,7 +876,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
867 dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma; 876 dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma;
868 877
869 /* CQ notification ring */ 878 /* CQ notification ring */
870 dev->dsr->cq_ring_pages.num_pages = 4; 879 dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
871 ret = pvrdma_page_dir_init(dev, &dev->cq_pdir, 880 ret = pvrdma_page_dir_init(dev, &dev->cq_pdir,
872 dev->dsr->cq_ring_pages.num_pages, true); 881 dev->dsr->cq_ring_pages.num_pages, true);
873 if (ret) 882 if (ret)
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index dbbfd35e7da7..30062aad3af1 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -170,8 +170,9 @@ static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap,
170 sizeof(struct pvrdma_sge) * 170 sizeof(struct pvrdma_sge) *
171 qp->sq.max_sg); 171 qp->sq.max_sg);
172 /* Note: one extra page for the header. */ 172 /* Note: one extra page for the header. */
173 qp->npages_send = 1 + (qp->sq.wqe_cnt * qp->sq.wqe_size + 173 qp->npages_send = PVRDMA_QP_NUM_HEADER_PAGES +
174 PAGE_SIZE - 1) / PAGE_SIZE; 174 (qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) /
175 PAGE_SIZE;
175 176
176 return 0; 177 return 0;
177} 178}
@@ -288,7 +289,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
288 qp->npages = qp->npages_send + qp->npages_recv; 289 qp->npages = qp->npages_send + qp->npages_recv;
289 290
290 /* Skip header page. */ 291 /* Skip header page. */
291 qp->sq.offset = PAGE_SIZE; 292 qp->sq.offset = PVRDMA_QP_NUM_HEADER_PAGES * PAGE_SIZE;
292 293
293 /* Recv queue pages are after send pages. */ 294 /* Recv queue pages are after send pages. */
294 qp->rq.offset = qp->npages_send * PAGE_SIZE; 295 qp->rq.offset = qp->npages_send * PAGE_SIZE;
@@ -341,7 +342,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
341 cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type); 342 cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type);
342 cmd->access_flags = IB_ACCESS_LOCAL_WRITE; 343 cmd->access_flags = IB_ACCESS_LOCAL_WRITE;
343 cmd->total_chunks = qp->npages; 344 cmd->total_chunks = qp->npages;
344 cmd->send_chunks = qp->npages_send - 1; 345 cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES;
345 cmd->pdir_dma = qp->pdir.dir_dma; 346 cmd->pdir_dma = qp->pdir.dir_dma;
346 347
347 dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n", 348 dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n",
@@ -554,13 +555,13 @@ out:
554 return ret; 555 return ret;
555} 556}
556 557
557static inline void *get_sq_wqe(struct pvrdma_qp *qp, int n) 558static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n)
558{ 559{
559 return pvrdma_page_dir_get_ptr(&qp->pdir, 560 return pvrdma_page_dir_get_ptr(&qp->pdir,
560 qp->sq.offset + n * qp->sq.wqe_size); 561 qp->sq.offset + n * qp->sq.wqe_size);
561} 562}
562 563
563static inline void *get_rq_wqe(struct pvrdma_qp *qp, int n) 564static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n)
564{ 565{
565 return pvrdma_page_dir_get_ptr(&qp->pdir, 566 return pvrdma_page_dir_get_ptr(&qp->pdir,
566 qp->rq.offset + n * qp->rq.wqe_size); 567 qp->rq.offset + n * qp->rq.wqe_size);
@@ -598,9 +599,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
598 unsigned long flags; 599 unsigned long flags;
599 struct pvrdma_sq_wqe_hdr *wqe_hdr; 600 struct pvrdma_sq_wqe_hdr *wqe_hdr;
600 struct pvrdma_sge *sge; 601 struct pvrdma_sge *sge;
601 int i, index; 602 int i, ret;
602 int nreq;
603 int ret;
604 603
605 /* 604 /*
606 * In states lower than RTS, we can fail immediately. In other states, 605 * In states lower than RTS, we can fail immediately. In other states,
@@ -613,9 +612,8 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
613 612
614 spin_lock_irqsave(&qp->sq.lock, flags); 613 spin_lock_irqsave(&qp->sq.lock, flags);
615 614
616 index = pvrdma_idx(&qp->sq.ring->prod_tail, qp->sq.wqe_cnt); 615 while (wr) {
617 for (nreq = 0; wr; nreq++, wr = wr->next) { 616 unsigned int tail = 0;
618 unsigned int tail;
619 617
620 if (unlikely(!pvrdma_idx_ring_has_space( 618 if (unlikely(!pvrdma_idx_ring_has_space(
621 qp->sq.ring, qp->sq.wqe_cnt, &tail))) { 619 qp->sq.ring, qp->sq.wqe_cnt, &tail))) {
@@ -680,7 +678,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
680 } 678 }
681 } 679 }
682 680
683 wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, index); 681 wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, tail);
684 memset(wqe_hdr, 0, sizeof(*wqe_hdr)); 682 memset(wqe_hdr, 0, sizeof(*wqe_hdr));
685 wqe_hdr->wr_id = wr->wr_id; 683 wqe_hdr->wr_id = wr->wr_id;
686 wqe_hdr->num_sge = wr->num_sge; 684 wqe_hdr->num_sge = wr->num_sge;
@@ -771,12 +769,11 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
771 /* Make sure wqe is written before index update */ 769 /* Make sure wqe is written before index update */
772 smp_wmb(); 770 smp_wmb();
773 771
774 index++;
775 if (unlikely(index >= qp->sq.wqe_cnt))
776 index = 0;
777 /* Update shared sq ring */ 772 /* Update shared sq ring */
778 pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail, 773 pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail,
779 qp->sq.wqe_cnt); 774 qp->sq.wqe_cnt);
775
776 wr = wr->next;
780 } 777 }
781 778
782 ret = 0; 779 ret = 0;
@@ -806,7 +803,6 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
806 struct pvrdma_qp *qp = to_vqp(ibqp); 803 struct pvrdma_qp *qp = to_vqp(ibqp);
807 struct pvrdma_rq_wqe_hdr *wqe_hdr; 804 struct pvrdma_rq_wqe_hdr *wqe_hdr;
808 struct pvrdma_sge *sge; 805 struct pvrdma_sge *sge;
809 int index, nreq;
810 int ret = 0; 806 int ret = 0;
811 int i; 807 int i;
812 808
@@ -821,9 +817,8 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
821 817
822 spin_lock_irqsave(&qp->rq.lock, flags); 818 spin_lock_irqsave(&qp->rq.lock, flags);
823 819
824 index = pvrdma_idx(&qp->rq.ring->prod_tail, qp->rq.wqe_cnt); 820 while (wr) {
825 for (nreq = 0; wr; nreq++, wr = wr->next) { 821 unsigned int tail = 0;
826 unsigned int tail;
827 822
828 if (unlikely(wr->num_sge > qp->rq.max_sg || 823 if (unlikely(wr->num_sge > qp->rq.max_sg ||
829 wr->num_sge < 0)) { 824 wr->num_sge < 0)) {
@@ -843,7 +838,7 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
843 goto out; 838 goto out;
844 } 839 }
845 840
846 wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, index); 841 wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, tail);
847 wqe_hdr->wr_id = wr->wr_id; 842 wqe_hdr->wr_id = wr->wr_id;
848 wqe_hdr->num_sge = wr->num_sge; 843 wqe_hdr->num_sge = wr->num_sge;
849 wqe_hdr->total_len = 0; 844 wqe_hdr->total_len = 0;
@@ -859,12 +854,11 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
859 /* Make sure wqe is written before index update */ 854 /* Make sure wqe is written before index update */
860 smp_wmb(); 855 smp_wmb();
861 856
862 index++;
863 if (unlikely(index >= qp->rq.wqe_cnt))
864 index = 0;
865 /* Update shared rq ring */ 857 /* Update shared rq ring */
866 pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail, 858 pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail,
867 qp->rq.wqe_cnt); 859 qp->rq.wqe_cnt);
860
861 wr = wr->next;
868 } 862 }
869 863
870 spin_unlock_irqrestore(&qp->rq.lock, flags); 864 spin_unlock_irqrestore(&qp->rq.lock, flags);
diff --git a/drivers/infiniband/sw/rdmavt/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c
index e202b8142759..6b712eecbd37 100644
--- a/drivers/infiniband/sw/rdmavt/mmap.c
+++ b/drivers/infiniband/sw/rdmavt/mmap.c
@@ -170,9 +170,9 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi,
170 170
171 spin_lock_irq(&rdi->mmap_offset_lock); 171 spin_lock_irq(&rdi->mmap_offset_lock);
172 if (rdi->mmap_offset == 0) 172 if (rdi->mmap_offset == 0)
173 rdi->mmap_offset = PAGE_SIZE; 173 rdi->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
174 ip->offset = rdi->mmap_offset; 174 ip->offset = rdi->mmap_offset;
175 rdi->mmap_offset += size; 175 rdi->mmap_offset += ALIGN(size, SHMLBA);
176 spin_unlock_irq(&rdi->mmap_offset_lock); 176 spin_unlock_irq(&rdi->mmap_offset_lock);
177 177
178 INIT_LIST_HEAD(&ip->pending_mmaps); 178 INIT_LIST_HEAD(&ip->pending_mmaps);
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
index 7d1ac27ed251..6332dedc11e8 100644
--- a/drivers/infiniband/sw/rxe/Kconfig
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -22,4 +22,4 @@ config RDMA_RXE
22 To configure and work with soft-RoCE driver please use the 22 To configure and work with soft-RoCE driver please use the
23 following wiki page under "configure Soft-RoCE (RXE)" section: 23 following wiki page under "configure Soft-RoCE (RXE)" section:
24 24
25 https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home 25 https://github.com/linux-rdma/rdma-core/blob/master/Documentation/rxe.md
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
index c572a4c09359..bd812e00988e 100644
--- a/drivers/infiniband/sw/rxe/rxe_mmap.c
+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
@@ -156,10 +156,10 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe,
156 spin_lock_bh(&rxe->mmap_offset_lock); 156 spin_lock_bh(&rxe->mmap_offset_lock);
157 157
158 if (rxe->mmap_offset == 0) 158 if (rxe->mmap_offset == 0)
159 rxe->mmap_offset = PAGE_SIZE; 159 rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
160 160
161 ip->info.offset = rxe->mmap_offset; 161 ip->info.offset = rxe->mmap_offset;
162 rxe->mmap_offset += size; 162 rxe->mmap_offset += ALIGN(size, SHMLBA);
163 163
164 spin_unlock_bh(&rxe->mmap_offset_lock); 164 spin_unlock_bh(&rxe->mmap_offset_lock);
165 165
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index dbfde0dc6ff7..9f95f50b2909 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -729,11 +729,11 @@ next_wqe:
729 ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb); 729 ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
730 if (ret) { 730 if (ret) {
731 qp->need_req_skb = 1; 731 qp->need_req_skb = 1;
732 kfree_skb(skb);
733 732
734 rollback_state(wqe, qp, &rollback_wqe, rollback_psn); 733 rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
735 734
736 if (ret == -EAGAIN) { 735 if (ret == -EAGAIN) {
736 kfree_skb(skb);
737 rxe_run_task(&qp->req.task, 1); 737 rxe_run_task(&qp->req.task, 1);
738 goto exit; 738 goto exit;
739 } 739 }
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index d404a8aba7af..c9dd385ce62e 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -813,18 +813,17 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
813 WARN_ON_ONCE(1); 813 WARN_ON_ONCE(1);
814 } 814 }
815 815
816 /* We successfully processed this new request. */
817 qp->resp.msn++;
818
819 /* next expected psn, read handles this separately */ 816 /* next expected psn, read handles this separately */
820 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; 817 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
821 818
822 qp->resp.opcode = pkt->opcode; 819 qp->resp.opcode = pkt->opcode;
823 qp->resp.status = IB_WC_SUCCESS; 820 qp->resp.status = IB_WC_SUCCESS;
824 821
825 if (pkt->mask & RXE_COMP_MASK) 822 if (pkt->mask & RXE_COMP_MASK) {
823 /* We successfully processed this new request. */
824 qp->resp.msn++;
826 return RESPST_COMPLETE; 825 return RESPST_COMPLETE;
827 else if (qp_type(qp) == IB_QPT_RC) 826 } else if (qp_type(qp) == IB_QPT_RC)
828 return RESPST_ACKNOWLEDGE; 827 return RESPST_ACKNOWLEDGE;
829 else 828 else
830 return RESPST_CLEANUP; 829 return RESPST_CLEANUP;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 9d0b22ad58c1..c1ae4aeae2f9 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -430,6 +430,7 @@ struct iser_fr_desc {
430 struct list_head list; 430 struct list_head list;
431 struct iser_reg_resources rsc; 431 struct iser_reg_resources rsc;
432 struct iser_pi_context *pi_ctx; 432 struct iser_pi_context *pi_ctx;
433 struct list_head all_list;
433}; 434};
434 435
435/** 436/**
@@ -443,6 +444,7 @@ struct iser_fr_pool {
443 struct list_head list; 444 struct list_head list;
444 spinlock_t lock; 445 spinlock_t lock;
445 int size; 446 int size;
447 struct list_head all_list;
446}; 448};
447 449
448/** 450/**
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 30b622f2ab73..c538a38c91ce 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -362,6 +362,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
362 int i, ret; 362 int i, ret;
363 363
364 INIT_LIST_HEAD(&fr_pool->list); 364 INIT_LIST_HEAD(&fr_pool->list);
365 INIT_LIST_HEAD(&fr_pool->all_list);
365 spin_lock_init(&fr_pool->lock); 366 spin_lock_init(&fr_pool->lock);
366 fr_pool->size = 0; 367 fr_pool->size = 0;
367 for (i = 0; i < cmds_max; i++) { 368 for (i = 0; i < cmds_max; i++) {
@@ -373,6 +374,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
373 } 374 }
374 375
375 list_add_tail(&desc->list, &fr_pool->list); 376 list_add_tail(&desc->list, &fr_pool->list);
377 list_add_tail(&desc->all_list, &fr_pool->all_list);
376 fr_pool->size++; 378 fr_pool->size++;
377 } 379 }
378 380
@@ -392,13 +394,13 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
392 struct iser_fr_desc *desc, *tmp; 394 struct iser_fr_desc *desc, *tmp;
393 int i = 0; 395 int i = 0;
394 396
395 if (list_empty(&fr_pool->list)) 397 if (list_empty(&fr_pool->all_list))
396 return; 398 return;
397 399
398 iser_info("freeing conn %p fr pool\n", ib_conn); 400 iser_info("freeing conn %p fr pool\n", ib_conn);
399 401
400 list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) { 402 list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) {
401 list_del(&desc->list); 403 list_del(&desc->all_list);
402 iser_free_reg_res(&desc->rsc); 404 iser_free_reg_res(&desc->rsc);
403 if (desc->pi_ctx) 405 if (desc->pi_ctx)
404 iser_free_pi_ctx(desc->pi_ctx); 406 iser_free_pi_ctx(desc->pi_ctx);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 91cbe86b25c8..fcbed35e95a8 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -817,6 +817,7 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
817 rx_wr->sg_list = &rx_desc->rx_sg; 817 rx_wr->sg_list = &rx_desc->rx_sg;
818 rx_wr->num_sge = 1; 818 rx_wr->num_sge = 1;
819 rx_wr->next = rx_wr + 1; 819 rx_wr->next = rx_wr + 1;
820 rx_desc->in_use = false;
820 } 821 }
821 rx_wr--; 822 rx_wr--;
822 rx_wr->next = NULL; /* mark end of work requests list */ 823 rx_wr->next = NULL; /* mark end of work requests list */
@@ -835,6 +836,15 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
835 struct ib_recv_wr *rx_wr_failed, rx_wr; 836 struct ib_recv_wr *rx_wr_failed, rx_wr;
836 int ret; 837 int ret;
837 838
839 if (!rx_desc->in_use) {
840 /*
841 * if the descriptor is not in-use we already reposted it
842 * for recv, so just silently return
843 */
844 return 0;
845 }
846
847 rx_desc->in_use = false;
838 rx_wr.wr_cqe = &rx_desc->rx_cqe; 848 rx_wr.wr_cqe = &rx_desc->rx_cqe;
839 rx_wr.sg_list = &rx_desc->rx_sg; 849 rx_wr.sg_list = &rx_desc->rx_sg;
840 rx_wr.num_sge = 1; 850 rx_wr.num_sge = 1;
@@ -1397,6 +1407,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1397 return; 1407 return;
1398 } 1408 }
1399 1409
1410 rx_desc->in_use = true;
1411
1400 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, 1412 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
1401 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1413 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1402 1414
@@ -1659,10 +1671,23 @@ isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
1659 ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr); 1671 ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr);
1660 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1672 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1661 1673
1662 if (ret) 1674 if (ret) {
1663 transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0); 1675 /*
1664 else 1676 * transport_generic_request_failure() expects to have
1665 isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd); 1677 * plus two references to handle queue-full, so re-add
1678 * one here as target-core will have already dropped
1679 * it after the first isert_put_datain() callback.
1680 */
1681 kref_get(&cmd->cmd_kref);
1682 transport_generic_request_failure(cmd, cmd->pi_err);
1683 } else {
1684 /*
1685 * XXX: isert_put_response() failure is not retried.
1686 */
1687 ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
1688 if (ret)
1689 pr_warn_ratelimited("isert_put_response() ret: %d\n", ret);
1690 }
1666} 1691}
1667 1692
1668static void 1693static void
@@ -1699,13 +1724,15 @@ isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1699 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1724 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1700 spin_unlock_bh(&cmd->istate_lock); 1725 spin_unlock_bh(&cmd->istate_lock);
1701 1726
1702 if (ret) { 1727 /*
1703 target_put_sess_cmd(se_cmd); 1728 * transport_generic_request_failure() will drop the extra
1704 transport_send_check_condition_and_sense(se_cmd, 1729 * se_cmd->cmd_kref reference after T10-PI error, and handle
1705 se_cmd->pi_err, 0); 1730 * any non-zero ->queue_status() callback error retries.
1706 } else { 1731 */
1732 if (ret)
1733 transport_generic_request_failure(se_cmd, se_cmd->pi_err);
1734 else
1707 target_execute_cmd(se_cmd); 1735 target_execute_cmd(se_cmd);
1708 }
1709} 1736}
1710 1737
1711static void 1738static void
@@ -2171,26 +2198,28 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2171 chain_wr = &isert_cmd->tx_desc.send_wr; 2198 chain_wr = &isert_cmd->tx_desc.send_wr;
2172 } 2199 }
2173 2200
2174 isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr); 2201 rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
2175 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd); 2202 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n",
2176 return 1; 2203 isert_cmd, rc);
2204 return rc;
2177} 2205}
2178 2206
2179static int 2207static int
2180isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) 2208isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2181{ 2209{
2182 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2210 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2211 int ret;
2183 2212
2184 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", 2213 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2185 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done); 2214 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
2186 2215
2187 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done; 2216 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
2188 isert_rdma_rw_ctx_post(isert_cmd, conn->context, 2217 ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context,
2189 &isert_cmd->tx_desc.tx_cqe, NULL); 2218 &isert_cmd->tx_desc.tx_cqe, NULL);
2190 2219
2191 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", 2220 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n",
2192 isert_cmd); 2221 isert_cmd, ret);
2193 return 0; 2222 return ret;
2194} 2223}
2195 2224
2196static int 2225static int
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index c02ada57d7f5..87d994de8c91 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -60,7 +60,7 @@
60 60
61#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \ 61#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
62 (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \ 62 (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
63 sizeof(struct ib_cqe))) 63 sizeof(struct ib_cqe) + sizeof(bool)))
64 64
65#define ISCSI_ISER_SG_TABLESIZE 256 65#define ISCSI_ISER_SG_TABLESIZE 256
66 66
@@ -85,6 +85,7 @@ struct iser_rx_desc {
85 u64 dma_addr; 85 u64 dma_addr;
86 struct ib_sge rx_sg; 86 struct ib_sge rx_sg;
87 struct ib_cqe rx_cqe; 87 struct ib_cqe rx_cqe;
88 bool in_use;
88 char pad[ISER_RX_PAD_SIZE]; 89 char pad[ISER_RX_PAD_SIZE];
89} __packed; 90} __packed;
90 91
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index d96aa27dfcdc..db64adfbe1af 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -141,6 +141,9 @@ static int iforce_usb_probe(struct usb_interface *intf,
141 141
142 interface = intf->cur_altsetting; 142 interface = intf->cur_altsetting;
143 143
144 if (interface->desc.bNumEndpoints < 2)
145 return -ENODEV;
146
144 epirq = &interface->endpoint[0].desc; 147 epirq = &interface->endpoint[0].desc;
145 epout = &interface->endpoint[1].desc; 148 epout = &interface->endpoint[1].desc;
146 149
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 155fcb3b6230..153b1ee13e03 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -202,6 +202,7 @@ static const struct xpad_device {
202 { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 202 { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
203 { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, 203 { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
204 { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 }, 204 { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
205 { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
205 { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 }, 206 { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
206 { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, 207 { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
207 { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 }, 208 { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
@@ -326,6 +327,7 @@ static struct usb_device_id xpad_table[] = {
326 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ 327 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
327 XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */ 328 XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
328 XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */ 329 XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */
330 XPAD_XBOXONE_VENDOR(0x1532), /* Razer Wildcat */
329 XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */ 331 XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */
330 XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ 332 XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
331 XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ 333 XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index 9cc6d057c302..23c191a2a071 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -700,6 +700,10 @@ static int cm109_usb_probe(struct usb_interface *intf,
700 int error = -ENOMEM; 700 int error = -ENOMEM;
701 701
702 interface = intf->cur_altsetting; 702 interface = intf->cur_altsetting;
703
704 if (interface->desc.bNumEndpoints < 1)
705 return -ENODEV;
706
703 endpoint = &interface->endpoint[0].desc; 707 endpoint = &interface->endpoint[0].desc;
704 708
705 if (!usb_endpoint_is_int_in(endpoint)) 709 if (!usb_endpoint_is_int_in(endpoint))
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index 9c0ea36913b4..f4e8fbec6a94 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1667,6 +1667,10 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
1667 return -EINVAL; 1667 return -EINVAL;
1668 1668
1669 alt = pcu->ctrl_intf->cur_altsetting; 1669 alt = pcu->ctrl_intf->cur_altsetting;
1670
1671 if (alt->desc.bNumEndpoints < 1)
1672 return -ENODEV;
1673
1670 pcu->ep_ctrl = &alt->endpoint[0].desc; 1674 pcu->ep_ctrl = &alt->endpoint[0].desc;
1671 pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl); 1675 pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl);
1672 1676
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index 79c964c075f1..6e7ff9561d92 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -875,6 +875,10 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
875 int ret, pipe, i; 875 int ret, pipe, i;
876 876
877 interface = intf->cur_altsetting; 877 interface = intf->cur_altsetting;
878
879 if (interface->desc.bNumEndpoints < 1)
880 return -ENODEV;
881
878 endpoint = &interface->endpoint[0].desc; 882 endpoint = &interface->endpoint[0].desc;
879 if (!usb_endpoint_is_int_in(endpoint)) 883 if (!usb_endpoint_is_int_in(endpoint))
880 return -ENODEV; 884 return -ENODEV;
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 72b28ebfe360..f210e19ddba6 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1282,10 +1282,8 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
1282 /* handle buttons */ 1282 /* handle buttons */
1283 if (pkt_id == SS4_PACKET_ID_STICK) { 1283 if (pkt_id == SS4_PACKET_ID_STICK) {
1284 f->ts_left = !!(SS4_BTN_V2(p) & 0x01); 1284 f->ts_left = !!(SS4_BTN_V2(p) & 0x01);
1285 if (!(priv->flags & ALPS_BUTTONPAD)) { 1285 f->ts_right = !!(SS4_BTN_V2(p) & 0x02);
1286 f->ts_right = !!(SS4_BTN_V2(p) & 0x02); 1286 f->ts_middle = !!(SS4_BTN_V2(p) & 0x04);
1287 f->ts_middle = !!(SS4_BTN_V2(p) & 0x04);
1288 }
1289 } else { 1287 } else {
1290 f->left = !!(SS4_BTN_V2(p) & 0x01); 1288 f->left = !!(SS4_BTN_V2(p) & 0x01);
1291 if (!(priv->flags & ALPS_BUTTONPAD)) { 1289 if (!(priv->flags & ALPS_BUTTONPAD)) {
@@ -2462,14 +2460,34 @@ static int alps_update_device_area_ss4_v2(unsigned char otp[][4],
2462 int num_y_electrode; 2460 int num_y_electrode;
2463 int x_pitch, y_pitch, x_phys, y_phys; 2461 int x_pitch, y_pitch, x_phys, y_phys;
2464 2462
2465 num_x_electrode = SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F); 2463 if (IS_SS4PLUS_DEV(priv->dev_id)) {
2466 num_y_electrode = SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F); 2464 num_x_electrode =
2465 SS4PLUS_NUMSENSOR_XOFFSET + (otp[0][2] & 0x0F);
2466 num_y_electrode =
2467 SS4PLUS_NUMSENSOR_YOFFSET + ((otp[0][2] >> 4) & 0x0F);
2468
2469 priv->x_max =
2470 (num_x_electrode - 1) * SS4PLUS_COUNT_PER_ELECTRODE;
2471 priv->y_max =
2472 (num_y_electrode - 1) * SS4PLUS_COUNT_PER_ELECTRODE;
2467 2473
2468 priv->x_max = (num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE; 2474 x_pitch = (otp[0][1] & 0x0F) + SS4PLUS_MIN_PITCH_MM;
2469 priv->y_max = (num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE; 2475 y_pitch = ((otp[0][1] >> 4) & 0x0F) + SS4PLUS_MIN_PITCH_MM;
2470 2476
2471 x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM; 2477 } else {
2472 y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM; 2478 num_x_electrode =
2479 SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F);
2480 num_y_electrode =
2481 SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F);
2482
2483 priv->x_max =
2484 (num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
2485 priv->y_max =
2486 (num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
2487
2488 x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM;
2489 y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM;
2490 }
2473 2491
2474 x_phys = x_pitch * (num_x_electrode - 1); /* In 0.1 mm units */ 2492 x_phys = x_pitch * (num_x_electrode - 1); /* In 0.1 mm units */
2475 y_phys = y_pitch * (num_y_electrode - 1); /* In 0.1 mm units */ 2493 y_phys = y_pitch * (num_y_electrode - 1); /* In 0.1 mm units */
@@ -2485,7 +2503,10 @@ static int alps_update_btn_info_ss4_v2(unsigned char otp[][4],
2485{ 2503{
2486 unsigned char is_btnless; 2504 unsigned char is_btnless;
2487 2505
2488 is_btnless = (otp[1][1] >> 3) & 0x01; 2506 if (IS_SS4PLUS_DEV(priv->dev_id))
2507 is_btnless = (otp[1][0] >> 1) & 0x01;
2508 else
2509 is_btnless = (otp[1][1] >> 3) & 0x01;
2489 2510
2490 if (is_btnless) 2511 if (is_btnless)
2491 priv->flags |= ALPS_BUTTONPAD; 2512 priv->flags |= ALPS_BUTTONPAD;
@@ -2493,6 +2514,21 @@ static int alps_update_btn_info_ss4_v2(unsigned char otp[][4],
2493 return 0; 2514 return 0;
2494} 2515}
2495 2516
2517static int alps_update_dual_info_ss4_v2(unsigned char otp[][4],
2518 struct alps_data *priv)
2519{
2520 bool is_dual = false;
2521
2522 if (IS_SS4PLUS_DEV(priv->dev_id))
2523 is_dual = (otp[0][0] >> 4) & 0x01;
2524
2525 if (is_dual)
2526 priv->flags |= ALPS_DUALPOINT |
2527 ALPS_DUALPOINT_WITH_PRESSURE;
2528
2529 return 0;
2530}
2531
2496static int alps_set_defaults_ss4_v2(struct psmouse *psmouse, 2532static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
2497 struct alps_data *priv) 2533 struct alps_data *priv)
2498{ 2534{
@@ -2508,6 +2544,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
2508 2544
2509 alps_update_btn_info_ss4_v2(otp, priv); 2545 alps_update_btn_info_ss4_v2(otp, priv);
2510 2546
2547 alps_update_dual_info_ss4_v2(otp, priv);
2548
2511 return 0; 2549 return 0;
2512} 2550}
2513 2551
@@ -2753,10 +2791,6 @@ static int alps_set_protocol(struct psmouse *psmouse,
2753 if (alps_set_defaults_ss4_v2(psmouse, priv)) 2791 if (alps_set_defaults_ss4_v2(psmouse, priv))
2754 return -EIO; 2792 return -EIO;
2755 2793
2756 if (priv->fw_ver[1] == 0x1)
2757 priv->flags |= ALPS_DUALPOINT |
2758 ALPS_DUALPOINT_WITH_PRESSURE;
2759
2760 break; 2794 break;
2761 } 2795 }
2762 2796
@@ -2827,10 +2861,7 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
2827 ec[2] >= 0x90 && ec[2] <= 0x9d) { 2861 ec[2] >= 0x90 && ec[2] <= 0x9d) {
2828 protocol = &alps_v3_protocol_data; 2862 protocol = &alps_v3_protocol_data;
2829 } else if (e7[0] == 0x73 && e7[1] == 0x03 && 2863 } else if (e7[0] == 0x73 && e7[1] == 0x03 &&
2830 e7[2] == 0x14 && ec[1] == 0x02) { 2864 (e7[2] == 0x14 || e7[2] == 0x28)) {
2831 protocol = &alps_v8_protocol_data;
2832 } else if (e7[0] == 0x73 && e7[1] == 0x03 &&
2833 e7[2] == 0x28 && ec[1] == 0x01) {
2834 protocol = &alps_v8_protocol_data; 2865 protocol = &alps_v8_protocol_data;
2835 } else { 2866 } else {
2836 psmouse_dbg(psmouse, 2867 psmouse_dbg(psmouse,
@@ -2840,7 +2871,8 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
2840 } 2871 }
2841 2872
2842 if (priv) { 2873 if (priv) {
2843 /* Save the Firmware version */ 2874 /* Save Device ID and Firmware version */
2875 memcpy(priv->dev_id, e7, 3);
2844 memcpy(priv->fw_ver, ec, 3); 2876 memcpy(priv->fw_ver, ec, 3);
2845 error = alps_set_protocol(psmouse, priv, protocol); 2877 error = alps_set_protocol(psmouse, priv, protocol);
2846 if (error) 2878 if (error)
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index 6d279aa27cb9..4334f2805d93 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -54,6 +54,16 @@ enum SS4_PACKET_ID {
54 54
55#define SS4_MASK_NORMAL_BUTTONS 0x07 55#define SS4_MASK_NORMAL_BUTTONS 0x07
56 56
57#define SS4PLUS_COUNT_PER_ELECTRODE 128
58#define SS4PLUS_NUMSENSOR_XOFFSET 16
59#define SS4PLUS_NUMSENSOR_YOFFSET 5
60#define SS4PLUS_MIN_PITCH_MM 37
61
62#define IS_SS4PLUS_DEV(_b) (((_b[0]) == 0x73) && \
63 ((_b[1]) == 0x03) && \
64 ((_b[2]) == 0x28) \
65 )
66
57#define SS4_IS_IDLE_V2(_b) (((_b[0]) == 0x18) && \ 67#define SS4_IS_IDLE_V2(_b) (((_b[0]) == 0x18) && \
58 ((_b[1]) == 0x10) && \ 68 ((_b[1]) == 0x10) && \
59 ((_b[2]) == 0x00) && \ 69 ((_b[2]) == 0x00) && \
@@ -283,6 +293,7 @@ struct alps_data {
283 int addr_command; 293 int addr_command;
284 u16 proto_version; 294 u16 proto_version;
285 u8 byte0, mask0; 295 u8 byte0, mask0;
296 u8 dev_id[3];
286 u8 fw_ver[3]; 297 u8 fw_ver[3];
287 int flags; 298 int flags;
288 int x_max; 299 int x_max;
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 352050e9031d..d5ab9ddef3e3 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -218,17 +218,19 @@ static int elan_query_product(struct elan_tp_data *data)
218 218
219static int elan_check_ASUS_special_fw(struct elan_tp_data *data) 219static int elan_check_ASUS_special_fw(struct elan_tp_data *data)
220{ 220{
221 if (data->ic_type != 0x0E) 221 if (data->ic_type == 0x0E) {
222 return false; 222 switch (data->product_id) {
223 223 case 0x05 ... 0x07:
224 switch (data->product_id) { 224 case 0x09:
225 case 0x05 ... 0x07: 225 case 0x13:
226 case 0x09: 226 return true;
227 case 0x13: 227 }
228 } else if (data->ic_type == 0x08 && data->product_id == 0x26) {
229 /* ASUS EeeBook X205TA */
228 return true; 230 return true;
229 default:
230 return false;
231 } 231 }
232
233 return false;
232} 234}
233 235
234static int __elan_initialize(struct elan_tp_data *data) 236static int __elan_initialize(struct elan_tp_data *data)
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index efc8ec342351..e73d968023f7 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1118,6 +1118,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
1118 * Asus UX32VD 0x361f02 00, 15, 0e clickpad 1118 * Asus UX32VD 0x361f02 00, 15, 0e clickpad
1119 * Avatar AVIU-145A2 0x361f00 ? clickpad 1119 * Avatar AVIU-145A2 0x361f00 ? clickpad
1120 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons 1120 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
1121 * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
1121 * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons 1122 * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
1122 * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons 1123 * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
1123 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) 1124 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
@@ -1524,6 +1525,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
1524 }, 1525 },
1525 }, 1526 },
1526 { 1527 {
1528 /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
1529 .matches = {
1530 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1531 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E547"),
1532 },
1533 },
1534 {
1527 /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */ 1535 /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
1528 .matches = { 1536 .matches = {
1529 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), 1537 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c
index 198678613382..34dfee555b20 100644
--- a/drivers/input/rmi4/rmi_f30.c
+++ b/drivers/input/rmi4/rmi_f30.c
@@ -170,6 +170,10 @@ static int rmi_f30_config(struct rmi_function *fn)
170 rmi_get_platform_data(fn->rmi_dev); 170 rmi_get_platform_data(fn->rmi_dev);
171 int error; 171 int error;
172 172
173 /* can happen if f30_data.disable is set */
174 if (!f30)
175 return 0;
176
173 if (pdata->f30_data.trackstick_buttons) { 177 if (pdata->f30_data.trackstick_buttons) {
174 /* Try [re-]establish link to F03. */ 178 /* Try [re-]establish link to F03. */
175 f30->f03 = rmi_find_function(fn->rmi_dev, 0x03); 179 f30->f03 = rmi_find_function(fn->rmi_dev, 0x03);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 05afd16ea9c9..312bd6ca9198 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -120,6 +120,13 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
120 }, 120 },
121 }, 121 },
122 { 122 {
123 /* Dell Embedded Box PC 3000 */
124 .matches = {
125 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
126 DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
127 },
128 },
129 {
123 /* OQO Model 01 */ 130 /* OQO Model 01 */
124 .matches = { 131 .matches = {
125 DMI_MATCH(DMI_SYS_VENDOR, "OQO"), 132 DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
@@ -513,6 +520,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
513 DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"), 520 DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
514 }, 521 },
515 }, 522 },
523 {
524 /* TUXEDO BU1406 */
525 .matches = {
526 DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
527 DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
528 },
529 },
516 { } 530 { }
517}; 531};
518 532
diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c
index cd852059b99e..df4bea96d7ed 100644
--- a/drivers/input/tablet/hanwang.c
+++ b/drivers/input/tablet/hanwang.c
@@ -340,6 +340,9 @@ static int hanwang_probe(struct usb_interface *intf, const struct usb_device_id
340 int error; 340 int error;
341 int i; 341 int i;
342 342
343 if (intf->cur_altsetting->desc.bNumEndpoints < 1)
344 return -ENODEV;
345
343 hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL); 346 hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL);
344 input_dev = input_allocate_device(); 347 input_dev = input_allocate_device();
345 if (!hanwang || !input_dev) { 348 if (!hanwang || !input_dev) {
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index e850d7e8afbc..4d9d64908b59 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -122,6 +122,9 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
122 struct input_dev *input_dev; 122 struct input_dev *input_dev;
123 int error = -ENOMEM; 123 int error = -ENOMEM;
124 124
125 if (intf->cur_altsetting->desc.bNumEndpoints < 1)
126 return -ENODEV;
127
125 kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL); 128 kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
126 input_dev = input_allocate_device(); 129 input_dev = input_allocate_device();
127 if (!kbtab || !input_dev) 130 if (!kbtab || !input_dev)
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index aefb6e11f88a..4c0eecae065c 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -527,6 +527,9 @@ static int sur40_probe(struct usb_interface *interface,
527 if (iface_desc->desc.bInterfaceClass != 0xFF) 527 if (iface_desc->desc.bInterfaceClass != 0xFF)
528 return -ENODEV; 528 return -ENODEV;
529 529
530 if (iface_desc->desc.bNumEndpoints < 5)
531 return -ENODEV;
532
530 /* Use endpoint #4 (0x86). */ 533 /* Use endpoint #4 (0x86). */
531 endpoint = &iface_desc->endpoint[4].desc; 534 endpoint = &iface_desc->endpoint[4].desc;
532 if (endpoint->bEndpointAddress != TOUCH_ENDPOINT) 535 if (endpoint->bEndpointAddress != TOUCH_ENDPOINT)
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 98940d1392cb..b17536d6e69b 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3202,7 +3202,7 @@ static void amd_iommu_get_resv_regions(struct device *dev,
3202 3202
3203 region = iommu_alloc_resv_region(MSI_RANGE_START, 3203 region = iommu_alloc_resv_region(MSI_RANGE_START,
3204 MSI_RANGE_END - MSI_RANGE_START + 1, 3204 MSI_RANGE_END - MSI_RANGE_START + 1,
3205 0, IOMMU_RESV_RESERVED); 3205 0, IOMMU_RESV_MSI);
3206 if (!region) 3206 if (!region)
3207 return; 3207 return;
3208 list_add_tail(&region->list, head); 3208 list_add_tail(&region->list, head);
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 5806a6acc94e..591bb96047c9 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1888,7 +1888,7 @@ static void arm_smmu_get_resv_regions(struct device *dev,
1888 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; 1888 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1889 1889
1890 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, 1890 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
1891 prot, IOMMU_RESV_MSI); 1891 prot, IOMMU_RESV_SW_MSI);
1892 if (!region) 1892 if (!region)
1893 return; 1893 return;
1894 1894
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index abf6496843a6..b493c99e17f7 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1608,7 +1608,7 @@ static void arm_smmu_get_resv_regions(struct device *dev,
1608 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; 1608 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1609 1609
1610 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, 1610 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
1611 prot, IOMMU_RESV_MSI); 1611 prot, IOMMU_RESV_SW_MSI);
1612 if (!region) 1612 if (!region)
1613 return; 1613 return;
1614 1614
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index a7e0821c9967..c01bfcdb2383 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -512,7 +512,13 @@ static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
512 spin_lock_irqsave(&data->lock, flags); 512 spin_lock_irqsave(&data->lock, flags);
513 if (data->active && data->version >= MAKE_MMU_VER(3, 3)) { 513 if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
514 clk_enable(data->clk_master); 514 clk_enable(data->clk_master);
515 __sysmmu_tlb_invalidate_entry(data, iova, 1); 515 if (sysmmu_block(data)) {
516 if (data->version >= MAKE_MMU_VER(5, 0))
517 __sysmmu_tlb_invalidate(data);
518 else
519 __sysmmu_tlb_invalidate_entry(data, iova, 1);
520 sysmmu_unblock(data);
521 }
516 clk_disable(data->clk_master); 522 clk_disable(data->clk_master);
517 } 523 }
518 spin_unlock_irqrestore(&data->lock, flags); 524 spin_unlock_irqrestore(&data->lock, flags);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 238ad3447712..d412a313a372 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -916,7 +916,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
916 * which we used for the IOMMU lookup. Strictly speaking 916 * which we used for the IOMMU lookup. Strictly speaking
917 * we could do this for all PCI devices; we only need to 917 * we could do this for all PCI devices; we only need to
918 * get the BDF# from the scope table for ACPI matches. */ 918 * get the BDF# from the scope table for ACPI matches. */
919 if (pdev->is_virtfn) 919 if (pdev && pdev->is_virtfn)
920 goto got_pdev; 920 goto got_pdev;
921 921
922 *bus = drhd->devices[i].bus; 922 *bus = drhd->devices[i].bus;
@@ -5249,7 +5249,7 @@ static void intel_iommu_get_resv_regions(struct device *device,
5249 5249
5250 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START, 5250 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5251 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1, 5251 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
5252 0, IOMMU_RESV_RESERVED); 5252 0, IOMMU_RESV_MSI);
5253 if (!reg) 5253 if (!reg)
5254 return; 5254 return;
5255 list_add_tail(&reg->list, head); 5255 list_add_tail(&reg->list, head);
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 1c049e2e12bf..8d6ca28c3e1f 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -422,8 +422,12 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
422 pte |= ARM_V7S_ATTR_NS_TABLE; 422 pte |= ARM_V7S_ATTR_NS_TABLE;
423 423
424 __arm_v7s_set_pte(ptep, pte, 1, cfg); 424 __arm_v7s_set_pte(ptep, pte, 1, cfg);
425 } else { 425 } else if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) {
426 cptep = iopte_deref(pte, lvl); 426 cptep = iopte_deref(pte, lvl);
427 } else {
428 /* We require an unmap first */
429 WARN_ON(!selftest_running);
430 return -EEXIST;
427 } 431 }
428 432
429 /* Rinse, repeat */ 433 /* Rinse, repeat */
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index feacc54bec68..f9bc6ebb8140 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -335,8 +335,12 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
335 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) 335 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
336 pte |= ARM_LPAE_PTE_NSTABLE; 336 pte |= ARM_LPAE_PTE_NSTABLE;
337 __arm_lpae_set_pte(ptep, pte, cfg); 337 __arm_lpae_set_pte(ptep, pte, cfg);
338 } else { 338 } else if (!iopte_leaf(pte, lvl)) {
339 cptep = iopte_deref(pte, data); 339 cptep = iopte_deref(pte, data);
340 } else {
341 /* We require an unmap first */
342 WARN_ON(!selftest_running);
343 return -EEXIST;
340 } 344 }
341 345
342 /* Rinse, repeat */ 346 /* Rinse, repeat */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 8ea14f41a979..3b67144dead2 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -72,6 +72,7 @@ static const char * const iommu_group_resv_type_string[] = {
72 [IOMMU_RESV_DIRECT] = "direct", 72 [IOMMU_RESV_DIRECT] = "direct",
73 [IOMMU_RESV_RESERVED] = "reserved", 73 [IOMMU_RESV_RESERVED] = "reserved",
74 [IOMMU_RESV_MSI] = "msi", 74 [IOMMU_RESV_MSI] = "msi",
75 [IOMMU_RESV_SW_MSI] = "msi",
75}; 76};
76 77
77#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 78#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
@@ -1743,8 +1744,8 @@ void iommu_put_resv_regions(struct device *dev, struct list_head *list)
1743} 1744}
1744 1745
1745struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, 1746struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
1746 size_t length, 1747 size_t length, int prot,
1747 int prot, int type) 1748 enum iommu_resv_type type)
1748{ 1749{
1749 struct iommu_resv_region *region; 1750 struct iommu_resv_region *region;
1750 1751
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 125528f39e92..8162121bb1bc 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -262,6 +262,7 @@ config IRQ_MXS
262 262
263config MVEBU_ODMI 263config MVEBU_ODMI
264 bool 264 bool
265 select GENERIC_MSI_IRQ_DOMAIN
265 266
266config MVEBU_PIC 267config MVEBU_PIC
267 bool 268 bool
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 15af9a9753e5..2d203b422129 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -230,6 +230,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
230 return -ENOMEM; 230 return -ENOMEM;
231 } 231 }
232 232
233 raw_spin_lock_init(&cd->rlock);
234
233 cd->gpc_base = of_iomap(node, 0); 235 cd->gpc_base = of_iomap(node, 0);
234 if (!cd->gpc_base) { 236 if (!cd->gpc_base) {
235 pr_err("fsl-gpcv2: unable to map gpc registers\n"); 237 pr_err("fsl-gpcv2: unable to map gpc registers\n");
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 11d12bccc4e7..cd20df12d63d 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -991,8 +991,12 @@ static void __init gic_map_single_int(struct device_node *node,
991 991
992static void __init gic_map_interrupts(struct device_node *node) 992static void __init gic_map_interrupts(struct device_node *node)
993{ 993{
994 gic_map_single_int(node, GIC_LOCAL_INT_WD);
995 gic_map_single_int(node, GIC_LOCAL_INT_COMPARE);
994 gic_map_single_int(node, GIC_LOCAL_INT_TIMER); 996 gic_map_single_int(node, GIC_LOCAL_INT_TIMER);
995 gic_map_single_int(node, GIC_LOCAL_INT_PERFCTR); 997 gic_map_single_int(node, GIC_LOCAL_INT_PERFCTR);
998 gic_map_single_int(node, GIC_LOCAL_INT_SWINT0);
999 gic_map_single_int(node, GIC_LOCAL_INT_SWINT1);
996 gic_map_single_int(node, GIC_LOCAL_INT_FDC); 1000 gic_map_single_int(node, GIC_LOCAL_INT_FDC);
997} 1001}
998 1002
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 1dfd1085a04f..9ca691d6c13b 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -1032,6 +1032,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
1032 sizeof(avmb1_carddef)))) 1032 sizeof(avmb1_carddef))))
1033 return -EFAULT; 1033 return -EFAULT;
1034 cdef.cardtype = AVM_CARDTYPE_B1; 1034 cdef.cardtype = AVM_CARDTYPE_B1;
1035 cdef.cardnr = 0;
1035 } else { 1036 } else {
1036 if ((retval = copy_from_user(&cdef, data, 1037 if ((retval = copy_from_user(&cdef, data,
1037 sizeof(avmb1_extcarddef)))) 1038 sizeof(avmb1_extcarddef))))
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index e4c2c1a1e993..6735c8d6a445 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -932,7 +932,7 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
932 *result = true; 932 *result = true;
933 933
934 r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root, 934 r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
935 from_cblock(begin), &cmd->dirty_cursor); 935 from_cblock(cmd->cache_blocks), &cmd->dirty_cursor);
936 if (r) { 936 if (r) {
937 DMERR("%s: dm_bitset_cursor_begin for dirty failed", __func__); 937 DMERR("%s: dm_bitset_cursor_begin for dirty failed", __func__);
938 return r; 938 return r;
@@ -959,14 +959,16 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
959 return 0; 959 return 0;
960 } 960 }
961 961
962 begin = to_cblock(from_cblock(begin) + 1);
963 if (begin == end)
964 break;
965
962 r = dm_bitset_cursor_next(&cmd->dirty_cursor); 966 r = dm_bitset_cursor_next(&cmd->dirty_cursor);
963 if (r) { 967 if (r) {
964 DMERR("%s: dm_bitset_cursor_next for dirty failed", __func__); 968 DMERR("%s: dm_bitset_cursor_next for dirty failed", __func__);
965 dm_bitset_cursor_end(&cmd->dirty_cursor); 969 dm_bitset_cursor_end(&cmd->dirty_cursor);
966 return r; 970 return r;
967 } 971 }
968
969 begin = to_cblock(from_cblock(begin) + 1);
970 } 972 }
971 973
972 dm_bitset_cursor_end(&cmd->dirty_cursor); 974 dm_bitset_cursor_end(&cmd->dirty_cursor);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index f8564d63982f..1e217ba84d09 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3726,7 +3726,7 @@ static int raid_preresume(struct dm_target *ti)
3726 return r; 3726 return r;
3727 3727
3728 /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */ 3728 /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */
3729 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && 3729 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
3730 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) { 3730 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
3731 r = bitmap_resize(mddev->bitmap, mddev->dev_sectors, 3731 r = bitmap_resize(mddev->bitmap, mddev->dev_sectors,
3732 to_bytes(rs->requested_bitmap_chunk_sectors), 0); 3732 to_bytes(rs->requested_bitmap_chunk_sectors), 0);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 28955b94d2b2..0b081d170087 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -755,6 +755,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
755 /* Undo dm_start_request() before requeuing */ 755 /* Undo dm_start_request() before requeuing */
756 rq_end_stats(md, rq); 756 rq_end_stats(md, rq);
757 rq_completed(md, rq_data_dir(rq), false); 757 rq_completed(md, rq_data_dir(rq), false);
758 blk_mq_delay_run_hw_queue(hctx, 100/*ms*/);
758 return BLK_MQ_RQ_QUEUE_BUSY; 759 return BLK_MQ_RQ_QUEUE_BUSY;
759 } 760 }
760 761
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 0f0eb8a3d922..78f36012eaca 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -146,8 +146,6 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
146 block = fec_buffer_rs_block(v, fio, n, i); 146 block = fec_buffer_rs_block(v, fio, n, i);
147 res = fec_decode_rs8(v, fio, block, &par[offset], neras); 147 res = fec_decode_rs8(v, fio, block, &par[offset], neras);
148 if (res < 0) { 148 if (res < 0) {
149 dm_bufio_release(buf);
150
151 r = res; 149 r = res;
152 goto error; 150 goto error;
153 } 151 }
@@ -172,6 +170,8 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
172done: 170done:
173 r = corrected; 171 r = corrected;
174error: 172error:
173 dm_bufio_release(buf);
174
175 if (r < 0 && neras) 175 if (r < 0 && neras)
176 DMERR_LIMIT("%s: FEC %llu: failed to correct: %d", 176 DMERR_LIMIT("%s: FEC %llu: failed to correct: %d",
177 v->data_dev->name, (unsigned long long)rsb, r); 177 v->data_dev->name, (unsigned long long)rsb, r);
@@ -269,7 +269,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
269 &is_zero) == 0) { 269 &is_zero) == 0) {
270 /* skip known zero blocks entirely */ 270 /* skip known zero blocks entirely */
271 if (is_zero) 271 if (is_zero)
272 continue; 272 goto done;
273 273
274 /* 274 /*
275 * skip if we have already found the theoretical 275 * skip if we have already found the theoretical
@@ -439,6 +439,13 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
439 if (!verity_fec_is_enabled(v)) 439 if (!verity_fec_is_enabled(v))
440 return -EOPNOTSUPP; 440 return -EOPNOTSUPP;
441 441
442 if (fio->level >= DM_VERITY_FEC_MAX_RECURSION) {
443 DMWARN_LIMIT("%s: FEC: recursion too deep", v->data_dev->name);
444 return -EIO;
445 }
446
447 fio->level++;
448
442 if (type == DM_VERITY_BLOCK_TYPE_METADATA) 449 if (type == DM_VERITY_BLOCK_TYPE_METADATA)
443 block += v->data_blocks; 450 block += v->data_blocks;
444 451
@@ -470,7 +477,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
470 if (r < 0) { 477 if (r < 0) {
471 r = fec_decode_rsb(v, io, fio, rsb, offset, true); 478 r = fec_decode_rsb(v, io, fio, rsb, offset, true);
472 if (r < 0) 479 if (r < 0)
473 return r; 480 goto done;
474 } 481 }
475 482
476 if (dest) 483 if (dest)
@@ -480,6 +487,8 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
480 r = verity_for_bv_block(v, io, iter, fec_bv_copy); 487 r = verity_for_bv_block(v, io, iter, fec_bv_copy);
481 } 488 }
482 489
490done:
491 fio->level--;
483 return r; 492 return r;
484} 493}
485 494
@@ -520,6 +529,7 @@ void verity_fec_init_io(struct dm_verity_io *io)
520 memset(fio->bufs, 0, sizeof(fio->bufs)); 529 memset(fio->bufs, 0, sizeof(fio->bufs));
521 fio->nbufs = 0; 530 fio->nbufs = 0;
522 fio->output = NULL; 531 fio->output = NULL;
532 fio->level = 0;
523} 533}
524 534
525/* 535/*
diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
index 7fa0298b995e..bb31ce87a933 100644
--- a/drivers/md/dm-verity-fec.h
+++ b/drivers/md/dm-verity-fec.h
@@ -27,6 +27,9 @@
27#define DM_VERITY_FEC_BUF_MAX \ 27#define DM_VERITY_FEC_BUF_MAX \
28 (1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS)) 28 (1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS))
29 29
30/* maximum recursion level for verity_fec_decode */
31#define DM_VERITY_FEC_MAX_RECURSION 4
32
30#define DM_VERITY_OPT_FEC_DEV "use_fec_from_device" 33#define DM_VERITY_OPT_FEC_DEV "use_fec_from_device"
31#define DM_VERITY_OPT_FEC_BLOCKS "fec_blocks" 34#define DM_VERITY_OPT_FEC_BLOCKS "fec_blocks"
32#define DM_VERITY_OPT_FEC_START "fec_start" 35#define DM_VERITY_OPT_FEC_START "fec_start"
@@ -58,6 +61,7 @@ struct dm_verity_fec_io {
58 unsigned nbufs; /* number of buffers allocated */ 61 unsigned nbufs; /* number of buffers allocated */
59 u8 *output; /* buffer for corrected output */ 62 u8 *output; /* buffer for corrected output */
60 size_t output_pos; 63 size_t output_pos;
64 unsigned level; /* recursion level */
61}; 65};
62 66
63#ifdef CONFIG_DM_VERITY_FEC 67#ifdef CONFIG_DM_VERITY_FEC
diff --git a/drivers/media/platform/coda/imx-vdoa.c b/drivers/media/platform/coda/imx-vdoa.c
index 67fd8ffa60a4..669a4c82f1ff 100644
--- a/drivers/media/platform/coda/imx-vdoa.c
+++ b/drivers/media/platform/coda/imx-vdoa.c
@@ -321,7 +321,7 @@ static const struct of_device_id vdoa_dt_ids[] = {
321}; 321};
322MODULE_DEVICE_TABLE(of, vdoa_dt_ids); 322MODULE_DEVICE_TABLE(of, vdoa_dt_ids);
323 323
324static const struct platform_driver vdoa_driver = { 324static struct platform_driver vdoa_driver = {
325 .probe = vdoa_probe, 325 .probe = vdoa_probe,
326 .remove = vdoa_remove, 326 .remove = vdoa_remove,
327 .driver = { 327 .driver = {
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index cbb03768f5d7..0f0c389f8897 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -861,9 +861,7 @@ int gsc_prepare_addr(struct gsc_ctx *ctx, struct vb2_buffer *vb,
861 861
862 if ((frame->fmt->pixelformat == V4L2_PIX_FMT_VYUY) || 862 if ((frame->fmt->pixelformat == V4L2_PIX_FMT_VYUY) ||
863 (frame->fmt->pixelformat == V4L2_PIX_FMT_YVYU) || 863 (frame->fmt->pixelformat == V4L2_PIX_FMT_YVYU) ||
864 (frame->fmt->pixelformat == V4L2_PIX_FMT_NV61) ||
865 (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) || 864 (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) ||
866 (frame->fmt->pixelformat == V4L2_PIX_FMT_NV21) ||
867 (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420M)) 865 (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420M))
868 swap(addr->cb, addr->cr); 866 swap(addr->cb, addr->cr);
869 867
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 823608112d89..7918b928f058 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -632,8 +632,8 @@ static int bdisp_open(struct file *file)
632 632
633error_ctrls: 633error_ctrls:
634 bdisp_ctrls_delete(ctx); 634 bdisp_ctrls_delete(ctx);
635error_fh:
636 v4l2_fh_del(&ctx->fh); 635 v4l2_fh_del(&ctx->fh);
636error_fh:
637 v4l2_fh_exit(&ctx->fh); 637 v4l2_fh_exit(&ctx->fh);
638 bdisp_hw_free_nodes(ctx); 638 bdisp_hw_free_nodes(ctx);
639mem_ctx: 639mem_ctx:
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
index ab9866024ec7..04033efe7ad5 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
@@ -36,16 +36,18 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
36int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type) 36int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
37{ 37{
38 struct hexline *hx; 38 struct hexline *hx;
39 u8 reset; 39 u8 *buf;
40 int ret,pos=0; 40 int ret, pos = 0;
41 u16 cpu_cs_register = cypress[type].cpu_cs_register;
41 42
42 hx = kmalloc(sizeof(*hx), GFP_KERNEL); 43 buf = kmalloc(sizeof(*hx), GFP_KERNEL);
43 if (!hx) 44 if (!buf)
44 return -ENOMEM; 45 return -ENOMEM;
46 hx = (struct hexline *)buf;
45 47
46 /* stop the CPU */ 48 /* stop the CPU */
47 reset = 1; 49 buf[0] = 1;
48 if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1) 50 if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1)
49 err("could not stop the USB controller CPU."); 51 err("could not stop the USB controller CPU.");
50 52
51 while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) { 53 while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) {
@@ -61,21 +63,21 @@ int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw
61 } 63 }
62 if (ret < 0) { 64 if (ret < 0) {
63 err("firmware download failed at %d with %d",pos,ret); 65 err("firmware download failed at %d with %d",pos,ret);
64 kfree(hx); 66 kfree(buf);
65 return ret; 67 return ret;
66 } 68 }
67 69
68 if (ret == 0) { 70 if (ret == 0) {
69 /* restart the CPU */ 71 /* restart the CPU */
70 reset = 0; 72 buf[0] = 0;
71 if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) { 73 if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) {
72 err("could not restart the USB controller CPU."); 74 err("could not restart the USB controller CPU.");
73 ret = -EINVAL; 75 ret = -EINVAL;
74 } 76 }
75 } else 77 } else
76 ret = -EIO; 78 ret = -EIO;
77 79
78 kfree(hx); 80 kfree(buf);
79 81
80 return ret; 82 return ret;
81} 83}
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 5457c361ad58..bf0fe0137dfe 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -1947,9 +1947,7 @@ static int gpmc_probe_onenand_child(struct platform_device *pdev,
1947 if (!of_property_read_u32(child, "dma-channel", &val)) 1947 if (!of_property_read_u32(child, "dma-channel", &val))
1948 gpmc_onenand_data->dma_channel = val; 1948 gpmc_onenand_data->dma_channel = val;
1949 1949
1950 gpmc_onenand_init(gpmc_onenand_data); 1950 return gpmc_onenand_init(gpmc_onenand_data);
1951
1952 return 0;
1953} 1951}
1954#else 1952#else
1955static int gpmc_probe_onenand_child(struct platform_device *pdev, 1953static int gpmc_probe_onenand_child(struct platform_device *pdev,
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 91f645992c94..b27ea98b781f 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1792,15 +1792,14 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
1792 1792
1793 /* If we're permanently dead, give up. */ 1793 /* If we're permanently dead, give up. */
1794 if (state == pci_channel_io_perm_failure) { 1794 if (state == pci_channel_io_perm_failure) {
1795 /* Tell the AFU drivers; but we don't care what they
1796 * say, we're going away.
1797 */
1798 for (i = 0; i < adapter->slices; i++) { 1795 for (i = 0; i < adapter->slices; i++) {
1799 afu = adapter->afu[i]; 1796 afu = adapter->afu[i];
1800 /* Only participate in EEH if we are on a virtual PHB */ 1797 /*
1801 if (afu->phb == NULL) 1798 * Tell the AFU drivers; but we don't care what they
1802 return PCI_ERS_RESULT_NONE; 1799 * say, we're going away.
1803 cxl_vphb_error_detected(afu, state); 1800 */
1801 if (afu->phb != NULL)
1802 cxl_vphb_error_detected(afu, state);
1804 } 1803 }
1805 return PCI_ERS_RESULT_DISCONNECT; 1804 return PCI_ERS_RESULT_DISCONNECT;
1806 } 1805 }
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 3600c9993a98..29f2daed37e0 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -112,11 +112,9 @@ struct mkhi_msg {
112 112
113static int mei_osver(struct mei_cl_device *cldev) 113static int mei_osver(struct mei_cl_device *cldev)
114{ 114{
115 int ret;
116 const size_t size = sizeof(struct mkhi_msg_hdr) + 115 const size_t size = sizeof(struct mkhi_msg_hdr) +
117 sizeof(struct mkhi_fwcaps) + 116 sizeof(struct mkhi_fwcaps) +
118 sizeof(struct mei_os_ver); 117 sizeof(struct mei_os_ver);
119 size_t length = 8;
120 char buf[size]; 118 char buf[size];
121 struct mkhi_msg *req; 119 struct mkhi_msg *req;
122 struct mkhi_fwcaps *fwcaps; 120 struct mkhi_fwcaps *fwcaps;
@@ -137,15 +135,7 @@ static int mei_osver(struct mei_cl_device *cldev)
137 os_ver = (struct mei_os_ver *)fwcaps->data; 135 os_ver = (struct mei_os_ver *)fwcaps->data;
138 os_ver->os_type = OSTYPE_LINUX; 136 os_ver->os_type = OSTYPE_LINUX;
139 137
140 ret = __mei_cl_send(cldev->cl, buf, size, mode); 138 return __mei_cl_send(cldev->cl, buf, size, mode);
141 if (ret < 0)
142 return ret;
143
144 ret = __mei_cl_recv(cldev->cl, buf, length, 0);
145 if (ret < 0)
146 return ret;
147
148 return 0;
149} 139}
150 140
151static void mei_mkhi_fix(struct mei_cl_device *cldev) 141static void mei_mkhi_fix(struct mei_cl_device *cldev)
@@ -160,7 +150,7 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev)
160 return; 150 return;
161 151
162 ret = mei_osver(cldev); 152 ret = mei_osver(cldev);
163 if (ret) 153 if (ret < 0)
164 dev_err(&cldev->dev, "OS version command failed %d\n", ret); 154 dev_err(&cldev->dev, "OS version command failed %d\n", ret);
165 155
166 mei_cldev_disable(cldev); 156 mei_cldev_disable(cldev);
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index cfb1cdf176fa..13c55b8f9261 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -124,8 +124,6 @@ int mei_reset(struct mei_device *dev)
124 124
125 mei_clear_interrupts(dev); 125 mei_clear_interrupts(dev);
126 126
127 mei_synchronize_irq(dev);
128
129 /* we're already in reset, cancel the init timer 127 /* we're already in reset, cancel the init timer
130 * if the reset was called due the hbm protocol error 128 * if the reset was called due the hbm protocol error
131 * we need to call it before hw start 129 * we need to call it before hw start
@@ -304,6 +302,9 @@ static void mei_reset_work(struct work_struct *work)
304 container_of(work, struct mei_device, reset_work); 302 container_of(work, struct mei_device, reset_work);
305 int ret; 303 int ret;
306 304
305 mei_clear_interrupts(dev);
306 mei_synchronize_irq(dev);
307
307 mutex_lock(&dev->device_lock); 308 mutex_lock(&dev->device_lock);
308 309
309 ret = mei_reset(dev); 310 ret = mei_reset(dev);
@@ -328,6 +329,9 @@ void mei_stop(struct mei_device *dev)
328 329
329 mei_cancel_work(dev); 330 mei_cancel_work(dev);
330 331
332 mei_clear_interrupts(dev);
333 mei_synchronize_irq(dev);
334
331 mutex_lock(&dev->device_lock); 335 mutex_lock(&dev->device_lock);
332 336
333 dev->dev_state = MEI_DEV_POWER_DOWN; 337 dev->dev_state = MEI_DEV_POWER_DOWN;
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index 9d659542a335..dad5abee656e 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -566,10 +566,10 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
566 */ 566 */
567 error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS, 567 error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS,
568 PCI_IRQ_MSIX); 568 PCI_IRQ_MSIX);
569 if (error) { 569 if (error < 0) {
570 error = pci_alloc_irq_vectors(pdev, 1, 1, 570 error = pci_alloc_irq_vectors(pdev, 1, 1,
571 PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY); 571 PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY);
572 if (error) 572 if (error < 0)
573 goto err_remove_bitmap; 573 goto err_remove_bitmap;
574 } else { 574 } else {
575 vmci_dev->exclusive_vectors = true; 575 vmci_dev->exclusive_vectors = true;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 1621fa08e206..ff3da960c473 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1560,11 +1560,8 @@ static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1560 struct mmc_blk_request *brq, struct request *req, 1560 struct mmc_blk_request *brq, struct request *req,
1561 bool old_req_pending) 1561 bool old_req_pending)
1562{ 1562{
1563 struct mmc_queue_req *mq_rq;
1564 bool req_pending; 1563 bool req_pending;
1565 1564
1566 mq_rq = container_of(brq, struct mmc_queue_req, brq);
1567
1568 /* 1565 /*
1569 * If this is an SD card and we're writing, we can first 1566 * If this is an SD card and we're writing, we can first
1570 * mark the known good sectors as ok. 1567 * mark the known good sectors as ok.
@@ -1701,7 +1698,8 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
1701 case MMC_BLK_CMD_ERR: 1698 case MMC_BLK_CMD_ERR:
1702 req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending); 1699 req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending);
1703 if (mmc_blk_reset(md, card->host, type)) { 1700 if (mmc_blk_reset(md, card->host, type)) {
1704 mmc_blk_rw_cmd_abort(card, old_req); 1701 if (req_pending)
1702 mmc_blk_rw_cmd_abort(card, old_req);
1705 mmc_blk_rw_try_restart(mq, new_req); 1703 mmc_blk_rw_try_restart(mq, new_req);
1706 return; 1704 return;
1707 } 1705 }
@@ -1817,6 +1815,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1817 mmc_blk_issue_flush(mq, req); 1815 mmc_blk_issue_flush(mq, req);
1818 } else { 1816 } else {
1819 mmc_blk_issue_rw_rq(mq, req); 1817 mmc_blk_issue_rw_rq(mq, req);
1818 card->host->context_info.is_waiting_last_req = false;
1820 } 1819 }
1821 1820
1822out: 1821out:
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 7fd722868875..b502601df228 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1730,7 +1730,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1730 err = mmc_select_hs400(card); 1730 err = mmc_select_hs400(card);
1731 if (err) 1731 if (err)
1732 goto free_card; 1732 goto free_card;
1733 } else { 1733 } else if (!mmc_card_hs400es(card)) {
1734 /* Select the desired bus width optionally */ 1734 /* Select the desired bus width optionally */
1735 err = mmc_select_bus_width(card); 1735 err = mmc_select_bus_width(card);
1736 if (err > 0 && mmc_card_hs(card)) { 1736 if (err > 0 && mmc_card_hs(card)) {
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index e992a7f8a16f..2b32b88949ba 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -267,7 +267,7 @@ static void sdio_release_func(struct device *dev)
267 sdio_free_func_cis(func); 267 sdio_free_func_cis(func);
268 268
269 kfree(func->info); 269 kfree(func->info);
270 270 kfree(func->tmpbuf);
271 kfree(func); 271 kfree(func);
272} 272}
273 273
@@ -282,6 +282,16 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card)
282 if (!func) 282 if (!func)
283 return ERR_PTR(-ENOMEM); 283 return ERR_PTR(-ENOMEM);
284 284
285 /*
286 * allocate buffer separately to make sure it's properly aligned for
287 * DMA usage (incl. 64 bit DMA)
288 */
289 func->tmpbuf = kmalloc(4, GFP_KERNEL);
290 if (!func->tmpbuf) {
291 kfree(func);
292 return ERR_PTR(-ENOMEM);
293 }
294
285 func->card = card; 295 func->card = card;
286 296
287 device_initialize(&func->dev); 297 device_initialize(&func->dev);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index a9ac0b457313..8718432751c5 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -22,6 +22,7 @@
22#include <linux/ioport.h> 22#include <linux/ioport.h>
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/pm_runtime.h>
25#include <linux/seq_file.h> 26#include <linux/seq_file.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
27#include <linux/stat.h> 28#include <linux/stat.h>
@@ -1621,10 +1622,16 @@ static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
1621 1622
1622 if (card->type == MMC_TYPE_SDIO || 1623 if (card->type == MMC_TYPE_SDIO ||
1623 card->type == MMC_TYPE_SD_COMBO) { 1624 card->type == MMC_TYPE_SD_COMBO) {
1624 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1625 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) {
1626 pm_runtime_get_noresume(mmc->parent);
1627 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1628 }
1625 clk_en_a = clk_en_a_old & ~clken_low_pwr; 1629 clk_en_a = clk_en_a_old & ~clken_low_pwr;
1626 } else { 1630 } else {
1627 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1631 if (test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) {
1632 pm_runtime_put_noidle(mmc->parent);
1633 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1634 }
1628 clk_en_a = clk_en_a_old | clken_low_pwr; 1635 clk_en_a = clk_en_a_old | clken_low_pwr;
1629 } 1636 }
1630 1637
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 8e32580c12b5..b235d8da0602 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -580,7 +580,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
580 } 580 }
581 } 581 }
582 sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV, 582 sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
583 (mode << 8) | (div % 0xff)); 583 (mode << 8) | div);
584 sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); 584 sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
585 while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB)) 585 while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
586 cpu_relax(); 586 cpu_relax();
@@ -1559,7 +1559,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
1559 host->src_clk_freq = clk_get_rate(host->src_clk); 1559 host->src_clk_freq = clk_get_rate(host->src_clk);
1560 /* Set host parameters to mmc */ 1560 /* Set host parameters to mmc */
1561 mmc->ops = &mt_msdc_ops; 1561 mmc->ops = &mt_msdc_ops;
1562 mmc->f_min = host->src_clk_freq / (4 * 255); 1562 mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
1563 1563
1564 mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23; 1564 mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
1565 /* MMC core transfer sizes tunable parameters */ 1565 /* MMC core transfer sizes tunable parameters */
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 7123ef96ed18..445fc47dc3e7 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -830,6 +830,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
830 830
831 switch (uhs) { 831 switch (uhs) {
832 case MMC_TIMING_UHS_SDR50: 832 case MMC_TIMING_UHS_SDR50:
833 case MMC_TIMING_UHS_DDR50:
833 pinctrl = imx_data->pins_100mhz; 834 pinctrl = imx_data->pins_100mhz;
834 break; 835 break;
835 case MMC_TIMING_UHS_SDR104: 836 case MMC_TIMING_UHS_SDR104:
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 410a55b1c25f..1cfd7f900339 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -28,13 +28,9 @@
28#include "sdhci-pltfm.h" 28#include "sdhci-pltfm.h"
29#include <linux/of.h> 29#include <linux/of.h>
30 30
31#define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c
32#define SDHCI_ARASAN_VENDOR_REGISTER 0x78 31#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
33 32
34#define VENDOR_ENHANCED_STROBE BIT(0) 33#define VENDOR_ENHANCED_STROBE BIT(0)
35#define CLK_CTRL_TIMEOUT_SHIFT 16
36#define CLK_CTRL_TIMEOUT_MASK (0xf << CLK_CTRL_TIMEOUT_SHIFT)
37#define CLK_CTRL_TIMEOUT_MIN_EXP 13
38 34
39#define PHY_CLK_TOO_SLOW_HZ 400000 35#define PHY_CLK_TOO_SLOW_HZ 400000
40 36
@@ -163,15 +159,15 @@ static int sdhci_arasan_syscon_write(struct sdhci_host *host,
163 159
164static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host) 160static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host)
165{ 161{
166 u32 div;
167 unsigned long freq; 162 unsigned long freq;
168 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 163 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
169 164
170 div = readl(host->ioaddr + SDHCI_ARASAN_CLK_CTRL_OFFSET); 165 /* SDHCI timeout clock is in kHz */
171 div = (div & CLK_CTRL_TIMEOUT_MASK) >> CLK_CTRL_TIMEOUT_SHIFT; 166 freq = DIV_ROUND_UP(clk_get_rate(pltfm_host->clk), 1000);
172 167
173 freq = clk_get_rate(pltfm_host->clk); 168 /* or in MHz */
174 freq /= 1 << (CLK_CTRL_TIMEOUT_MIN_EXP + div); 169 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
170 freq = DIV_ROUND_UP(freq, 1000);
175 171
176 return freq; 172 return freq;
177} 173}
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 2f9ad213377a..d5430ed02a67 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -29,6 +29,8 @@
29 29
30#include "sdhci-pltfm.h" 30#include "sdhci-pltfm.h"
31 31
32#define SDMMC_MC1R 0x204
33#define SDMMC_MC1R_DDR BIT(3)
32#define SDMMC_CACR 0x230 34#define SDMMC_CACR 0x230
33#define SDMMC_CACR_CAPWREN BIT(0) 35#define SDMMC_CACR_CAPWREN BIT(0)
34#define SDMMC_CACR_KEY (0x46 << 8) 36#define SDMMC_CACR_KEY (0x46 << 8)
@@ -85,11 +87,37 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
85 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 87 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
86} 88}
87 89
90/*
91 * In this specific implementation of the SDHCI controller, the power register
92 * needs to have a valid voltage set even when the power supply is managed by
93 * an external regulator.
94 */
95static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode,
96 unsigned short vdd)
97{
98 if (!IS_ERR(host->mmc->supply.vmmc)) {
99 struct mmc_host *mmc = host->mmc;
100
101 spin_unlock_irq(&host->lock);
102 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
103 spin_lock_irq(&host->lock);
104 }
105 sdhci_set_power_noreg(host, mode, vdd);
106}
107
108void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
109{
110 if (timing == MMC_TIMING_MMC_DDR52)
111 sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R);
112 sdhci_set_uhs_signaling(host, timing);
113}
114
88static const struct sdhci_ops sdhci_at91_sama5d2_ops = { 115static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
89 .set_clock = sdhci_at91_set_clock, 116 .set_clock = sdhci_at91_set_clock,
90 .set_bus_width = sdhci_set_bus_width, 117 .set_bus_width = sdhci_set_bus_width,
91 .reset = sdhci_reset, 118 .reset = sdhci_reset,
92 .set_uhs_signaling = sdhci_set_uhs_signaling, 119 .set_uhs_signaling = sdhci_at91_set_uhs_signaling,
120 .set_power = sdhci_at91_set_power,
93}; 121};
94 122
95static const struct sdhci_pltfm_data soc_data_sama5d2 = { 123static const struct sdhci_pltfm_data soc_data_sama5d2 = {
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 982b3e349426..86560d590786 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -451,6 +451,8 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
451 if (mode == MMC_POWER_OFF) 451 if (mode == MMC_POWER_OFF)
452 return; 452 return;
453 453
454 spin_unlock_irq(&host->lock);
455
454 /* 456 /*
455 * Bus power might not enable after D3 -> D0 transition due to the 457 * Bus power might not enable after D3 -> D0 transition due to the
456 * present state not yet having propagated. Retry for up to 2ms. 458 * present state not yet having propagated. Retry for up to 2ms.
@@ -463,6 +465,8 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
463 reg |= SDHCI_POWER_ON; 465 reg |= SDHCI_POWER_ON;
464 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); 466 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
465 } 467 }
468
469 spin_lock_irq(&host->lock);
466} 470}
467 471
468static const struct sdhci_ops sdhci_intel_byt_ops = { 472static const struct sdhci_ops sdhci_intel_byt_ops = {
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6fdd7a70f229..63bc33a54d0d 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1362,7 +1362,9 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1362 return; 1362 return;
1363 } 1363 }
1364 timeout--; 1364 timeout--;
1365 mdelay(1); 1365 spin_unlock_irq(&host->lock);
1366 usleep_range(900, 1100);
1367 spin_lock_irq(&host->lock);
1366 } 1368 }
1367 1369
1368 clk |= SDHCI_CLOCK_CARD_EN; 1370 clk |= SDHCI_CLOCK_CARD_EN;
@@ -1828,6 +1830,9 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1828 struct sdhci_host *host = mmc_priv(mmc); 1830 struct sdhci_host *host = mmc_priv(mmc);
1829 unsigned long flags; 1831 unsigned long flags;
1830 1832
1833 if (enable)
1834 pm_runtime_get_noresume(host->mmc->parent);
1835
1831 spin_lock_irqsave(&host->lock, flags); 1836 spin_lock_irqsave(&host->lock, flags);
1832 if (enable) 1837 if (enable)
1833 host->flags |= SDHCI_SDIO_IRQ_ENABLED; 1838 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
@@ -1836,6 +1841,9 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1836 1841
1837 sdhci_enable_sdio_irq_nolock(host, enable); 1842 sdhci_enable_sdio_irq_nolock(host, enable);
1838 spin_unlock_irqrestore(&host->lock, flags); 1843 spin_unlock_irqrestore(&host->lock, flags);
1844
1845 if (!enable)
1846 pm_runtime_put_noidle(host->mmc->parent);
1839} 1847}
1840 1848
1841static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 1849static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
index d2c386f09d69..1d843357422e 100644
--- a/drivers/mmc/host/ushc.c
+++ b/drivers/mmc/host/ushc.c
@@ -426,6 +426,9 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id
426 struct ushc_data *ushc; 426 struct ushc_data *ushc;
427 int ret; 427 int ret;
428 428
429 if (intf->cur_altsetting->desc.bNumEndpoints < 1)
430 return -ENODEV;
431
429 mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev); 432 mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev);
430 if (mmc == NULL) 433 if (mmc == NULL)
431 return -ENOMEM; 434 return -ENOMEM;
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 0134ba32a057..39712560b4c1 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -148,11 +148,11 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
148 return err; 148 return err;
149 } 149 }
150 150
151 if (bytes == 0) { 151 err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
152 err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL); 152 if (err)
153 if (err) 153 return err;
154 return err;
155 154
155 if (bytes == 0) {
156 err = clear_update_marker(ubi, vol, 0); 156 err = clear_update_marker(ubi, vol, 0);
157 if (err) 157 if (err)
158 return err; 158 return err;
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 138f5ae75c0b..4d1fe8d95042 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -557,7 +557,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
557 int work_done = 0; 557 int work_done = 0;
558 558
559 u32 stcmd = readl(priv->base + IFI_CANFD_STCMD); 559 u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
560 u32 rxstcmd = readl(priv->base + IFI_CANFD_STCMD); 560 u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD);
561 u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR); 561 u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
562 562
563 /* Handle bus state changes */ 563 /* Handle bus state changes */
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index caed4e6960f8..11662f479e76 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -826,8 +826,7 @@ static int rcar_can_probe(struct platform_device *pdev)
826 826
827 devm_can_led_init(ndev); 827 devm_can_led_init(ndev);
828 828
829 dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n", 829 dev_info(&pdev->dev, "device registered (IRQ%d)\n", ndev->irq);
830 priv->regs, ndev->irq);
831 830
832 return 0; 831 return 0;
833fail_candev: 832fail_candev:
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 8a280e7d66bd..127adbeefb10 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -984,29 +984,29 @@
984#define XP_ECC_CNT1_DESC_DED_WIDTH 8 984#define XP_ECC_CNT1_DESC_DED_WIDTH 8
985#define XP_ECC_CNT1_DESC_SEC_INDEX 0 985#define XP_ECC_CNT1_DESC_SEC_INDEX 0
986#define XP_ECC_CNT1_DESC_SEC_WIDTH 8 986#define XP_ECC_CNT1_DESC_SEC_WIDTH 8
987#define XP_ECC_IER_DESC_DED_INDEX 0 987#define XP_ECC_IER_DESC_DED_INDEX 5
988#define XP_ECC_IER_DESC_DED_WIDTH 1 988#define XP_ECC_IER_DESC_DED_WIDTH 1
989#define XP_ECC_IER_DESC_SEC_INDEX 1 989#define XP_ECC_IER_DESC_SEC_INDEX 4
990#define XP_ECC_IER_DESC_SEC_WIDTH 1 990#define XP_ECC_IER_DESC_SEC_WIDTH 1
991#define XP_ECC_IER_RX_DED_INDEX 2 991#define XP_ECC_IER_RX_DED_INDEX 3
992#define XP_ECC_IER_RX_DED_WIDTH 1 992#define XP_ECC_IER_RX_DED_WIDTH 1
993#define XP_ECC_IER_RX_SEC_INDEX 3 993#define XP_ECC_IER_RX_SEC_INDEX 2
994#define XP_ECC_IER_RX_SEC_WIDTH 1 994#define XP_ECC_IER_RX_SEC_WIDTH 1
995#define XP_ECC_IER_TX_DED_INDEX 4 995#define XP_ECC_IER_TX_DED_INDEX 1
996#define XP_ECC_IER_TX_DED_WIDTH 1 996#define XP_ECC_IER_TX_DED_WIDTH 1
997#define XP_ECC_IER_TX_SEC_INDEX 5 997#define XP_ECC_IER_TX_SEC_INDEX 0
998#define XP_ECC_IER_TX_SEC_WIDTH 1 998#define XP_ECC_IER_TX_SEC_WIDTH 1
999#define XP_ECC_ISR_DESC_DED_INDEX 0 999#define XP_ECC_ISR_DESC_DED_INDEX 5
1000#define XP_ECC_ISR_DESC_DED_WIDTH 1 1000#define XP_ECC_ISR_DESC_DED_WIDTH 1
1001#define XP_ECC_ISR_DESC_SEC_INDEX 1 1001#define XP_ECC_ISR_DESC_SEC_INDEX 4
1002#define XP_ECC_ISR_DESC_SEC_WIDTH 1 1002#define XP_ECC_ISR_DESC_SEC_WIDTH 1
1003#define XP_ECC_ISR_RX_DED_INDEX 2 1003#define XP_ECC_ISR_RX_DED_INDEX 3
1004#define XP_ECC_ISR_RX_DED_WIDTH 1 1004#define XP_ECC_ISR_RX_DED_WIDTH 1
1005#define XP_ECC_ISR_RX_SEC_INDEX 3 1005#define XP_ECC_ISR_RX_SEC_INDEX 2
1006#define XP_ECC_ISR_RX_SEC_WIDTH 1 1006#define XP_ECC_ISR_RX_SEC_WIDTH 1
1007#define XP_ECC_ISR_TX_DED_INDEX 4 1007#define XP_ECC_ISR_TX_DED_INDEX 1
1008#define XP_ECC_ISR_TX_DED_WIDTH 1 1008#define XP_ECC_ISR_TX_DED_WIDTH 1
1009#define XP_ECC_ISR_TX_SEC_INDEX 5 1009#define XP_ECC_ISR_TX_SEC_INDEX 0
1010#define XP_ECC_ISR_TX_SEC_WIDTH 1 1010#define XP_ECC_ISR_TX_SEC_WIDTH 1
1011#define XP_I2C_MUTEX_BUSY_INDEX 31 1011#define XP_I2C_MUTEX_BUSY_INDEX 31
1012#define XP_I2C_MUTEX_BUSY_WIDTH 1 1012#define XP_I2C_MUTEX_BUSY_WIDTH 1
@@ -1148,8 +1148,8 @@
1148#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1 1148#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
1149#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1 1149#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
1150#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 1150#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
1151#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2 1151#define RX_PACKET_ATTRIBUTES_LAST_INDEX 2
1152#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1 1152#define RX_PACKET_ATTRIBUTES_LAST_WIDTH 1
1153#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3 1153#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3
1154#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1 1154#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1
1155#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4 1155#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4
@@ -1158,6 +1158,8 @@
1158#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1 1158#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
1159#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6 1159#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
1160#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1 1160#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
1161#define RX_PACKET_ATTRIBUTES_FIRST_INDEX 7
1162#define RX_PACKET_ATTRIBUTES_FIRST_WIDTH 1
1161 1163
1162#define RX_NORMAL_DESC0_OVT_INDEX 0 1164#define RX_NORMAL_DESC0_OVT_INDEX 0
1163#define RX_NORMAL_DESC0_OVT_WIDTH 16 1165#define RX_NORMAL_DESC0_OVT_WIDTH 16
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 937f37a5dcb2..24a687ce4388 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1896,10 +1896,15 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
1896 1896
1897 /* Get the header length */ 1897 /* Get the header length */
1898 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) { 1898 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
1899 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1900 FIRST, 1);
1899 rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, 1901 rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
1900 RX_NORMAL_DESC2, HL); 1902 RX_NORMAL_DESC2, HL);
1901 if (rdata->rx.hdr_len) 1903 if (rdata->rx.hdr_len)
1902 pdata->ext_stats.rx_split_header_packets++; 1904 pdata->ext_stats.rx_split_header_packets++;
1905 } else {
1906 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1907 FIRST, 0);
1903 } 1908 }
1904 1909
1905 /* Get the RSS hash */ 1910 /* Get the RSS hash */
@@ -1922,19 +1927,16 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
1922 } 1927 }
1923 } 1928 }
1924 1929
1925 /* Get the packet length */ 1930 /* Not all the data has been transferred for this packet */
1926 rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); 1931 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
1927
1928 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
1929 /* Not all the data has been transferred for this packet */
1930 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1931 INCOMPLETE, 1);
1932 return 0; 1932 return 0;
1933 }
1934 1933
1935 /* This is the last of the data for this packet */ 1934 /* This is the last of the data for this packet */
1936 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1935 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1937 INCOMPLETE, 0); 1936 LAST, 1);
1937
1938 /* Get the packet length */
1939 rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
1938 1940
1939 /* Set checksum done indicator as appropriate */ 1941 /* Set checksum done indicator as appropriate */
1940 if (netdev->features & NETIF_F_RXCSUM) 1942 if (netdev->features & NETIF_F_RXCSUM)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index ffea9859f5a7..a713abd9d03e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1971,13 +1971,12 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
1971{ 1971{
1972 struct sk_buff *skb; 1972 struct sk_buff *skb;
1973 u8 *packet; 1973 u8 *packet;
1974 unsigned int copy_len;
1975 1974
1976 skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len); 1975 skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
1977 if (!skb) 1976 if (!skb)
1978 return NULL; 1977 return NULL;
1979 1978
1980 /* Start with the header buffer which may contain just the header 1979 /* Pull in the header buffer which may contain just the header
1981 * or the header plus data 1980 * or the header plus data
1982 */ 1981 */
1983 dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base, 1982 dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
@@ -1986,30 +1985,49 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
1986 1985
1987 packet = page_address(rdata->rx.hdr.pa.pages) + 1986 packet = page_address(rdata->rx.hdr.pa.pages) +
1988 rdata->rx.hdr.pa.pages_offset; 1987 rdata->rx.hdr.pa.pages_offset;
1989 copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len; 1988 skb_copy_to_linear_data(skb, packet, len);
1990 copy_len = min(rdata->rx.hdr.dma_len, copy_len); 1989 skb_put(skb, len);
1991 skb_copy_to_linear_data(skb, packet, copy_len);
1992 skb_put(skb, copy_len);
1993
1994 len -= copy_len;
1995 if (len) {
1996 /* Add the remaining data as a frag */
1997 dma_sync_single_range_for_cpu(pdata->dev,
1998 rdata->rx.buf.dma_base,
1999 rdata->rx.buf.dma_off,
2000 rdata->rx.buf.dma_len,
2001 DMA_FROM_DEVICE);
2002
2003 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
2004 rdata->rx.buf.pa.pages,
2005 rdata->rx.buf.pa.pages_offset,
2006 len, rdata->rx.buf.dma_len);
2007 rdata->rx.buf.pa.pages = NULL;
2008 }
2009 1990
2010 return skb; 1991 return skb;
2011} 1992}
2012 1993
1994static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
1995 struct xgbe_packet_data *packet)
1996{
1997 /* Always zero if not the first descriptor */
1998 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
1999 return 0;
2000
2001 /* First descriptor with split header, return header length */
2002 if (rdata->rx.hdr_len)
2003 return rdata->rx.hdr_len;
2004
2005 /* First descriptor but not the last descriptor and no split header,
2006 * so the full buffer was used
2007 */
2008 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
2009 return rdata->rx.hdr.dma_len;
2010
2011 /* First descriptor and last descriptor and no split header, so
2012 * calculate how much of the buffer was used
2013 */
2014 return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
2015}
2016
2017static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
2018 struct xgbe_packet_data *packet,
2019 unsigned int len)
2020{
2021 /* Always the full buffer if not the last descriptor */
2022 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
2023 return rdata->rx.buf.dma_len;
2024
2025 /* Last descriptor so calculate how much of the buffer was used
2026 * for the last bit of data
2027 */
2028 return rdata->rx.len - len;
2029}
2030
2013static int xgbe_tx_poll(struct xgbe_channel *channel) 2031static int xgbe_tx_poll(struct xgbe_channel *channel)
2014{ 2032{
2015 struct xgbe_prv_data *pdata = channel->pdata; 2033 struct xgbe_prv_data *pdata = channel->pdata;
@@ -2092,8 +2110,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
2092 struct napi_struct *napi; 2110 struct napi_struct *napi;
2093 struct sk_buff *skb; 2111 struct sk_buff *skb;
2094 struct skb_shared_hwtstamps *hwtstamps; 2112 struct skb_shared_hwtstamps *hwtstamps;
2095 unsigned int incomplete, error, context_next, context; 2113 unsigned int last, error, context_next, context;
2096 unsigned int len, rdesc_len, max_len; 2114 unsigned int len, buf1_len, buf2_len, max_len;
2097 unsigned int received = 0; 2115 unsigned int received = 0;
2098 int packet_count = 0; 2116 int packet_count = 0;
2099 2117
@@ -2103,7 +2121,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
2103 if (!ring) 2121 if (!ring)
2104 return 0; 2122 return 0;
2105 2123
2106 incomplete = 0; 2124 last = 0;
2107 context_next = 0; 2125 context_next = 0;
2108 2126
2109 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; 2127 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
@@ -2137,9 +2155,8 @@ read_again:
2137 received++; 2155 received++;
2138 ring->cur++; 2156 ring->cur++;
2139 2157
2140 incomplete = XGMAC_GET_BITS(packet->attributes, 2158 last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2141 RX_PACKET_ATTRIBUTES, 2159 LAST);
2142 INCOMPLETE);
2143 context_next = XGMAC_GET_BITS(packet->attributes, 2160 context_next = XGMAC_GET_BITS(packet->attributes,
2144 RX_PACKET_ATTRIBUTES, 2161 RX_PACKET_ATTRIBUTES,
2145 CONTEXT_NEXT); 2162 CONTEXT_NEXT);
@@ -2148,7 +2165,7 @@ read_again:
2148 CONTEXT); 2165 CONTEXT);
2149 2166
2150 /* Earlier error, just drain the remaining data */ 2167 /* Earlier error, just drain the remaining data */
2151 if ((incomplete || context_next) && error) 2168 if ((!last || context_next) && error)
2152 goto read_again; 2169 goto read_again;
2153 2170
2154 if (error || packet->errors) { 2171 if (error || packet->errors) {
@@ -2160,16 +2177,22 @@ read_again:
2160 } 2177 }
2161 2178
2162 if (!context) { 2179 if (!context) {
2163 /* Length is cumulative, get this descriptor's length */ 2180 /* Get the data length in the descriptor buffers */
2164 rdesc_len = rdata->rx.len - len; 2181 buf1_len = xgbe_rx_buf1_len(rdata, packet);
2165 len += rdesc_len; 2182 len += buf1_len;
2183 buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
2184 len += buf2_len;
2166 2185
2167 if (rdesc_len && !skb) { 2186 if (!skb) {
2168 skb = xgbe_create_skb(pdata, napi, rdata, 2187 skb = xgbe_create_skb(pdata, napi, rdata,
2169 rdesc_len); 2188 buf1_len);
2170 if (!skb) 2189 if (!skb) {
2171 error = 1; 2190 error = 1;
2172 } else if (rdesc_len) { 2191 goto skip_data;
2192 }
2193 }
2194
2195 if (buf2_len) {
2173 dma_sync_single_range_for_cpu(pdata->dev, 2196 dma_sync_single_range_for_cpu(pdata->dev,
2174 rdata->rx.buf.dma_base, 2197 rdata->rx.buf.dma_base,
2175 rdata->rx.buf.dma_off, 2198 rdata->rx.buf.dma_off,
@@ -2179,13 +2202,14 @@ read_again:
2179 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 2202 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
2180 rdata->rx.buf.pa.pages, 2203 rdata->rx.buf.pa.pages,
2181 rdata->rx.buf.pa.pages_offset, 2204 rdata->rx.buf.pa.pages_offset,
2182 rdesc_len, 2205 buf2_len,
2183 rdata->rx.buf.dma_len); 2206 rdata->rx.buf.dma_len);
2184 rdata->rx.buf.pa.pages = NULL; 2207 rdata->rx.buf.pa.pages = NULL;
2185 } 2208 }
2186 } 2209 }
2187 2210
2188 if (incomplete || context_next) 2211skip_data:
2212 if (!last || context_next)
2189 goto read_again; 2213 goto read_again;
2190 2214
2191 if (!skb) 2215 if (!skb)
@@ -2243,7 +2267,7 @@ next_packet:
2243 } 2267 }
2244 2268
2245 /* Check if we need to save state before leaving */ 2269 /* Check if we need to save state before leaving */
2246 if (received && (incomplete || context_next)) { 2270 if (received && (!last || context_next)) {
2247 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 2271 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2248 rdata->state_saved = 1; 2272 rdata->state_saved = 1;
2249 rdata->state.skb = skb; 2273 rdata->state.skb = skb;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index dad63623be6a..5d6c40d86775 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -98,11 +98,7 @@ static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
98 98
99 if (err < 0) 99 if (err < 0)
100 goto err_exit; 100 goto err_exit;
101 101 ndev->mtu = new_mtu;
102 if (netif_running(ndev)) {
103 aq_ndev_close(ndev);
104 aq_ndev_open(ndev);
105 }
106 102
107err_exit: 103err_exit:
108 return err; 104 return err;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index ee78444bfb88..cdb02991f249 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -487,6 +487,9 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
487 dx_buff->mss = skb_shinfo(skb)->gso_size; 487 dx_buff->mss = skb_shinfo(skb)->gso_size;
488 dx_buff->is_txc = 1U; 488 dx_buff->is_txc = 1U;
489 489
490 dx_buff->is_ipv6 =
491 (ip_hdr(skb)->version == 6) ? 1U : 0U;
492
490 dx = aq_ring_next_dx(ring, dx); 493 dx = aq_ring_next_dx(ring, dx);
491 dx_buff = &ring->buff_ring[dx]; 494 dx_buff = &ring->buff_ring[dx];
492 ++ret; 495 ++ret;
@@ -510,10 +513,22 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
510 if (skb->ip_summed == CHECKSUM_PARTIAL) { 513 if (skb->ip_summed == CHECKSUM_PARTIAL) {
511 dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? 514 dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
512 1U : 0U; 515 1U : 0U;
513 dx_buff->is_tcp_cso = 516
514 (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U; 517 if (ip_hdr(skb)->version == 4) {
515 dx_buff->is_udp_cso = 518 dx_buff->is_tcp_cso =
516 (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U; 519 (ip_hdr(skb)->protocol == IPPROTO_TCP) ?
520 1U : 0U;
521 dx_buff->is_udp_cso =
522 (ip_hdr(skb)->protocol == IPPROTO_UDP) ?
523 1U : 0U;
524 } else if (ip_hdr(skb)->version == 6) {
525 dx_buff->is_tcp_cso =
526 (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ?
527 1U : 0U;
528 dx_buff->is_udp_cso =
529 (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ?
530 1U : 0U;
531 }
517 } 532 }
518 533
519 for (; nr_frags--; ++frag_count) { 534 for (; nr_frags--; ++frag_count) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 0358e6072d45..3a8a4aa13687 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -101,6 +101,7 @@ int aq_ring_init(struct aq_ring_s *self)
101 self->hw_head = 0; 101 self->hw_head = 0;
102 self->sw_head = 0; 102 self->sw_head = 0;
103 self->sw_tail = 0; 103 self->sw_tail = 0;
104 spin_lock_init(&self->header.lock);
104 return 0; 105 return 0;
105} 106}
106 107
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 257254645068..eecd6d1c4d73 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -58,7 +58,8 @@ struct __packed aq_ring_buff_s {
58 u8 len_l2; 58 u8 len_l2;
59 u8 len_l3; 59 u8 len_l3;
60 u8 len_l4; 60 u8 len_l4;
61 u8 rsvd2; 61 u8 is_ipv6:1;
62 u8 rsvd2:7;
62 u32 len_pkt; 63 u32 len_pkt;
63 }; 64 };
64 }; 65 };
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index a2b746a2dd50..4ee15ff06a44 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -433,6 +433,9 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
433 buff->len_l3 + 433 buff->len_l3 +
434 buff->len_l2); 434 buff->len_l2);
435 is_gso = true; 435 is_gso = true;
436
437 if (buff->is_ipv6)
438 txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPV6;
436 } else { 439 } else {
437 buff_pa_len = buff->len; 440 buff_pa_len = buff->len;
438 441
@@ -458,6 +461,7 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
458 if (unlikely(buff->is_eop)) { 461 if (unlikely(buff->is_eop)) {
459 txd->ctl |= HW_ATL_A0_TXD_CTL_EOP; 462 txd->ctl |= HW_ATL_A0_TXD_CTL_EOP;
460 txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB; 463 txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB;
464 is_gso = false;
461 } 465 }
462 } 466 }
463 467
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
index 1093ea18823a..0592a0330cf0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
@@ -137,6 +137,7 @@ static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = {
137 .tx_rings = HW_ATL_A0_TX_RINGS, 137 .tx_rings = HW_ATL_A0_TX_RINGS,
138 .rx_rings = HW_ATL_A0_RX_RINGS, 138 .rx_rings = HW_ATL_A0_RX_RINGS,
139 .hw_features = NETIF_F_HW_CSUM | 139 .hw_features = NETIF_F_HW_CSUM |
140 NETIF_F_RXCSUM |
140 NETIF_F_RXHASH | 141 NETIF_F_RXHASH |
141 NETIF_F_SG | 142 NETIF_F_SG |
142 NETIF_F_TSO, 143 NETIF_F_TSO,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index cab2931dab9a..42150708191d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -471,6 +471,9 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
471 buff->len_l3 + 471 buff->len_l3 +
472 buff->len_l2); 472 buff->len_l2);
473 is_gso = true; 473 is_gso = true;
474
475 if (buff->is_ipv6)
476 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6;
474 } else { 477 } else {
475 buff_pa_len = buff->len; 478 buff_pa_len = buff->len;
476 479
@@ -496,6 +499,7 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
496 if (unlikely(buff->is_eop)) { 499 if (unlikely(buff->is_eop)) {
497 txd->ctl |= HW_ATL_B0_TXD_CTL_EOP; 500 txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
498 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB; 501 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
502 is_gso = false;
499 } 503 }
500 } 504 }
501 505
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index 8bdee3ddd5a0..f3957e930340 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -188,6 +188,7 @@ static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = {
188 .tx_rings = HW_ATL_B0_TX_RINGS, 188 .tx_rings = HW_ATL_B0_TX_RINGS,
189 .rx_rings = HW_ATL_B0_RX_RINGS, 189 .rx_rings = HW_ATL_B0_RX_RINGS,
190 .hw_features = NETIF_F_HW_CSUM | 190 .hw_features = NETIF_F_HW_CSUM |
191 NETIF_F_RXCSUM |
191 NETIF_F_RXHASH | 192 NETIF_F_RXHASH |
192 NETIF_F_SG | 193 NETIF_F_SG |
193 NETIF_F_TSO | 194 NETIF_F_TSO |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 0a23034bbe3f..352beff796ae 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -2277,7 +2277,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2277 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \ 2277 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \
2278 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC)) 2278 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))
2279 2279
2280#define HW_INTERRUT_ASSERT_SET_0 \ 2280#define HW_INTERRUPT_ASSERT_SET_0 \
2281 (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \ 2281 (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
2282 AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \ 2282 AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
2283 AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \ 2283 AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
@@ -2290,7 +2290,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2290 AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\ 2290 AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\
2291 AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\ 2291 AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\
2292 AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR) 2292 AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR)
2293#define HW_INTERRUT_ASSERT_SET_1 \ 2293#define HW_INTERRUPT_ASSERT_SET_1 \
2294 (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \ 2294 (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \
2295 AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \ 2295 AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \
2296 AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \ 2296 AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \
@@ -2318,7 +2318,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2318 AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \ 2318 AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \
2319 AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\ 2319 AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\
2320 AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR) 2320 AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR)
2321#define HW_INTERRUT_ASSERT_SET_2 \ 2321#define HW_INTERRUPT_ASSERT_SET_2 \
2322 (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \ 2322 (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \
2323 AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \ 2323 AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \
2324 AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \ 2324 AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index ac76fc251d26..a851f95c307a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -4166,14 +4166,14 @@ static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
4166 bnx2x_release_phy_lock(bp); 4166 bnx2x_release_phy_lock(bp);
4167 } 4167 }
4168 4168
4169 if (attn & HW_INTERRUT_ASSERT_SET_0) { 4169 if (attn & HW_INTERRUPT_ASSERT_SET_0) {
4170 4170
4171 val = REG_RD(bp, reg_offset); 4171 val = REG_RD(bp, reg_offset);
4172 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); 4172 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_0);
4173 REG_WR(bp, reg_offset, val); 4173 REG_WR(bp, reg_offset, val);
4174 4174
4175 BNX2X_ERR("FATAL HW block attention set0 0x%x\n", 4175 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
4176 (u32)(attn & HW_INTERRUT_ASSERT_SET_0)); 4176 (u32)(attn & HW_INTERRUPT_ASSERT_SET_0));
4177 bnx2x_panic(); 4177 bnx2x_panic();
4178 } 4178 }
4179} 4179}
@@ -4191,7 +4191,7 @@ static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4191 BNX2X_ERR("FATAL error from DORQ\n"); 4191 BNX2X_ERR("FATAL error from DORQ\n");
4192 } 4192 }
4193 4193
4194 if (attn & HW_INTERRUT_ASSERT_SET_1) { 4194 if (attn & HW_INTERRUPT_ASSERT_SET_1) {
4195 4195
4196 int port = BP_PORT(bp); 4196 int port = BP_PORT(bp);
4197 int reg_offset; 4197 int reg_offset;
@@ -4200,11 +4200,11 @@ static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4200 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 4200 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
4201 4201
4202 val = REG_RD(bp, reg_offset); 4202 val = REG_RD(bp, reg_offset);
4203 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); 4203 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_1);
4204 REG_WR(bp, reg_offset, val); 4204 REG_WR(bp, reg_offset, val);
4205 4205
4206 BNX2X_ERR("FATAL HW block attention set1 0x%x\n", 4206 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
4207 (u32)(attn & HW_INTERRUT_ASSERT_SET_1)); 4207 (u32)(attn & HW_INTERRUPT_ASSERT_SET_1));
4208 bnx2x_panic(); 4208 bnx2x_panic();
4209 } 4209 }
4210} 4210}
@@ -4235,7 +4235,7 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4235 } 4235 }
4236 } 4236 }
4237 4237
4238 if (attn & HW_INTERRUT_ASSERT_SET_2) { 4238 if (attn & HW_INTERRUPT_ASSERT_SET_2) {
4239 4239
4240 int port = BP_PORT(bp); 4240 int port = BP_PORT(bp);
4241 int reg_offset; 4241 int reg_offset;
@@ -4244,11 +4244,11 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4244 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 4244 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
4245 4245
4246 val = REG_RD(bp, reg_offset); 4246 val = REG_RD(bp, reg_offset);
4247 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); 4247 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_2);
4248 REG_WR(bp, reg_offset, val); 4248 REG_WR(bp, reg_offset, val);
4249 4249
4250 BNX2X_ERR("FATAL HW block attention set2 0x%x\n", 4250 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
4251 (u32)(attn & HW_INTERRUT_ASSERT_SET_2)); 4251 (u32)(attn & HW_INTERRUPT_ASSERT_SET_2));
4252 bnx2x_panic(); 4252 bnx2x_panic();
4253 } 4253 }
4254} 4254}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 32de4589d16a..1f1e54ba0ecb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1983,20 +1983,25 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
1983 1983
1984 for (j = 0; j < max_idx; j++) { 1984 for (j = 0; j < max_idx; j++) {
1985 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; 1985 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
1986 dma_addr_t mapping = rx_buf->mapping;
1986 void *data = rx_buf->data; 1987 void *data = rx_buf->data;
1987 1988
1988 if (!data) 1989 if (!data)
1989 continue; 1990 continue;
1990 1991
1991 dma_unmap_single(&pdev->dev, rx_buf->mapping,
1992 bp->rx_buf_use_size, bp->rx_dir);
1993
1994 rx_buf->data = NULL; 1992 rx_buf->data = NULL;
1995 1993
1996 if (BNXT_RX_PAGE_MODE(bp)) 1994 if (BNXT_RX_PAGE_MODE(bp)) {
1995 mapping -= bp->rx_dma_offset;
1996 dma_unmap_page(&pdev->dev, mapping,
1997 PAGE_SIZE, bp->rx_dir);
1997 __free_page(data); 1998 __free_page(data);
1998 else 1999 } else {
2000 dma_unmap_single(&pdev->dev, mapping,
2001 bp->rx_buf_use_size,
2002 bp->rx_dir);
1999 kfree(data); 2003 kfree(data);
2004 }
2000 } 2005 }
2001 2006
2002 for (j = 0; j < max_agg_idx; j++) { 2007 for (j = 0; j < max_agg_idx; j++) {
@@ -2455,6 +2460,18 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2455 return 0; 2460 return 0;
2456} 2461}
2457 2462
2463static void bnxt_init_cp_rings(struct bnxt *bp)
2464{
2465 int i;
2466
2467 for (i = 0; i < bp->cp_nr_rings; i++) {
2468 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
2469 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
2470
2471 ring->fw_ring_id = INVALID_HW_RING_ID;
2472 }
2473}
2474
2458static int bnxt_init_rx_rings(struct bnxt *bp) 2475static int bnxt_init_rx_rings(struct bnxt *bp)
2459{ 2476{
2460 int i, rc = 0; 2477 int i, rc = 0;
@@ -4732,7 +4749,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
4732 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); 4749 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
4733 if (rc) { 4750 if (rc) {
4734 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 4751 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
4735 rc, i); 4752 i, rc);
4736 return rc; 4753 return rc;
4737 } 4754 }
4738 } 4755 }
@@ -5006,6 +5023,7 @@ static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
5006 5023
5007static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 5024static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
5008{ 5025{
5026 bnxt_init_cp_rings(bp);
5009 bnxt_init_rx_rings(bp); 5027 bnxt_init_rx_rings(bp);
5010 bnxt_init_tx_rings(bp); 5028 bnxt_init_tx_rings(bp);
5011 bnxt_init_ring_grps(bp, irq_re_init); 5029 bnxt_init_ring_grps(bp, irq_re_init);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 69015fa50f20..365895ed3c3e 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -3481,7 +3481,8 @@ static int bcmgenet_suspend(struct device *d)
3481 3481
3482 bcmgenet_netif_stop(dev); 3482 bcmgenet_netif_stop(dev);
3483 3483
3484 phy_suspend(priv->phydev); 3484 if (!device_may_wakeup(d))
3485 phy_suspend(priv->phydev);
3485 3486
3486 netif_device_detach(dev); 3487 netif_device_detach(dev);
3487 3488
@@ -3578,7 +3579,8 @@ static int bcmgenet_resume(struct device *d)
3578 3579
3579 netif_device_attach(dev); 3580 netif_device_attach(dev);
3580 3581
3581 phy_resume(priv->phydev); 3582 if (!device_may_wakeup(d))
3583 phy_resume(priv->phydev);
3582 3584
3583 if (priv->eee.eee_enabled) 3585 if (priv->eee.eee_enabled)
3584 bcmgenet_eee_enable_set(dev, true); 3586 bcmgenet_eee_enable_set(dev, true);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index e87607621e62..2f9281936f0e 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -220,20 +220,6 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
220 udelay(60); 220 udelay(60);
221} 221}
222 222
223static void bcmgenet_internal_phy_setup(struct net_device *dev)
224{
225 struct bcmgenet_priv *priv = netdev_priv(dev);
226 u32 reg;
227
228 /* Power up PHY */
229 bcmgenet_phy_power_set(dev, true);
230 /* enable APD */
231 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
232 reg |= EXT_PWR_DN_EN_LD;
233 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
234 bcmgenet_mii_reset(dev);
235}
236
237static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) 223static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
238{ 224{
239 u32 reg; 225 u32 reg;
@@ -281,7 +267,6 @@ int bcmgenet_mii_config(struct net_device *dev)
281 267
282 if (priv->internal_phy) { 268 if (priv->internal_phy) {
283 phy_name = "internal PHY"; 269 phy_name = "internal PHY";
284 bcmgenet_internal_phy_setup(dev);
285 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { 270 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
286 phy_name = "MoCA"; 271 phy_name = "MoCA";
287 bcmgenet_moca_phy_setup(priv); 272 bcmgenet_moca_phy_setup(priv);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 9e59663a6ead..0f6811860ad5 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -1930,13 +1930,13 @@ static void
1930bfa_ioc_send_enable(struct bfa_ioc *ioc) 1930bfa_ioc_send_enable(struct bfa_ioc *ioc)
1931{ 1931{
1932 struct bfi_ioc_ctrl_req enable_req; 1932 struct bfi_ioc_ctrl_req enable_req;
1933 struct timeval tv;
1934 1933
1935 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, 1934 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1936 bfa_ioc_portid(ioc)); 1935 bfa_ioc_portid(ioc));
1937 enable_req.clscode = htons(ioc->clscode); 1936 enable_req.clscode = htons(ioc->clscode);
1938 do_gettimeofday(&tv); 1937 enable_req.rsvd = htons(0);
1939 enable_req.tv_sec = ntohl(tv.tv_sec); 1938 /* overflow in 2106 */
1939 enable_req.tv_sec = ntohl(ktime_get_real_seconds());
1940 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req)); 1940 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1941} 1941}
1942 1942
@@ -1947,6 +1947,10 @@ bfa_ioc_send_disable(struct bfa_ioc *ioc)
1947 1947
1948 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ, 1948 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1949 bfa_ioc_portid(ioc)); 1949 bfa_ioc_portid(ioc));
1950 disable_req.clscode = htons(ioc->clscode);
1951 disable_req.rsvd = htons(0);
1952 /* overflow in 2106 */
1953 disable_req.tv_sec = ntohl(ktime_get_real_seconds());
1950 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req)); 1954 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1951} 1955}
1952 1956
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 05c1c1dd7751..cebfe3bd086e 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -325,7 +325,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
325 return PTR_ERR(kern_buf); 325 return PTR_ERR(kern_buf);
326 326
327 rc = sscanf(kern_buf, "%x:%x", &addr, &len); 327 rc = sscanf(kern_buf, "%x:%x", &addr, &len);
328 if (rc < 2) { 328 if (rc < 2 || len > UINT_MAX >> 2) {
329 netdev_warn(bnad->netdev, "failed to read user buffer\n"); 329 netdev_warn(bnad->netdev, "failed to read user buffer\n");
330 kfree(kern_buf); 330 kfree(kern_buf);
331 return -EINVAL; 331 return -EINVAL;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 64a1095e4d14..a0ca68ce3fbb 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -134,6 +134,7 @@ static void set_max_bgx_per_node(struct pci_dev *pdev)
134 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid); 134 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
135 switch (sdevid) { 135 switch (sdevid) {
136 case PCI_SUBSYS_DEVID_81XX_BGX: 136 case PCI_SUBSYS_DEVID_81XX_BGX:
137 case PCI_SUBSYS_DEVID_81XX_RGX:
137 max_bgx_per_node = MAX_BGX_PER_CN81XX; 138 max_bgx_per_node = MAX_BGX_PER_CN81XX;
138 break; 139 break;
139 case PCI_SUBSYS_DEVID_83XX_BGX: 140 case PCI_SUBSYS_DEVID_83XX_BGX:
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index c5080f2cead5..6b7fe6fdd13b 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -16,6 +16,7 @@
16/* Subsystem device IDs */ 16/* Subsystem device IDs */
17#define PCI_SUBSYS_DEVID_88XX_BGX 0xA126 17#define PCI_SUBSYS_DEVID_88XX_BGX 0xA126
18#define PCI_SUBSYS_DEVID_81XX_BGX 0xA226 18#define PCI_SUBSYS_DEVID_81XX_BGX 0xA226
19#define PCI_SUBSYS_DEVID_81XX_RGX 0xA254
19#define PCI_SUBSYS_DEVID_83XX_BGX 0xA326 20#define PCI_SUBSYS_DEVID_83XX_BGX 0xA326
20 21
21#define MAX_BGX_THUNDER 8 /* Max 2 nodes, 4 per node */ 22#define MAX_BGX_THUNDER 8 /* Max 2 nodes, 4 per node */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 30e855004c57..02dd5246dfae 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -4939,8 +4939,9 @@ static int
4939__be_cmd_set_logical_link_config(struct be_adapter *adapter, 4939__be_cmd_set_logical_link_config(struct be_adapter *adapter,
4940 int link_state, int version, u8 domain) 4940 int link_state, int version, u8 domain)
4941{ 4941{
4942 struct be_mcc_wrb *wrb;
4943 struct be_cmd_req_set_ll_link *req; 4942 struct be_cmd_req_set_ll_link *req;
4943 struct be_mcc_wrb *wrb;
4944 u32 link_config = 0;
4944 int status; 4945 int status;
4945 4946
4946 mutex_lock(&adapter->mcc_lock); 4947 mutex_lock(&adapter->mcc_lock);
@@ -4962,10 +4963,12 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
4962 4963
4963 if (link_state == IFLA_VF_LINK_STATE_ENABLE || 4964 if (link_state == IFLA_VF_LINK_STATE_ENABLE ||
4964 link_state == IFLA_VF_LINK_STATE_AUTO) 4965 link_state == IFLA_VF_LINK_STATE_AUTO)
4965 req->link_config |= PLINK_ENABLE; 4966 link_config |= PLINK_ENABLE;
4966 4967
4967 if (link_state == IFLA_VF_LINK_STATE_AUTO) 4968 if (link_state == IFLA_VF_LINK_STATE_AUTO)
4968 req->link_config |= PLINK_TRACK; 4969 link_config |= PLINK_TRACK;
4970
4971 req->link_config = cpu_to_le32(link_config);
4969 4972
4970 status = be_mcc_notify_wait(adapter); 4973 status = be_mcc_notify_wait(adapter);
4971err: 4974err:
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 992ebe973d25..f819843e2bae 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -189,11 +189,9 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
189 189
190 nps_enet_tx_handler(ndev); 190 nps_enet_tx_handler(ndev);
191 work_done = nps_enet_rx_handler(ndev); 191 work_done = nps_enet_rx_handler(ndev);
192 if (work_done < budget) { 192 if ((work_done < budget) && napi_complete_done(napi, work_done)) {
193 u32 buf_int_enable_value = 0; 193 u32 buf_int_enable_value = 0;
194 194
195 napi_complete_done(napi, work_done);
196
197 /* set tx_done and rx_rdy bits */ 195 /* set tx_done and rx_rdy bits */
198 buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT; 196 buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
199 buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT; 197 buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 928b0df2b8e0..ade6b3e4ed13 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -28,8 +28,10 @@
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/of.h>
31#include <linux/phy.h> 32#include <linux/phy.h>
32#include <linux/platform_device.h> 33#include <linux/platform_device.h>
34#include <linux/property.h>
33#include <net/ip.h> 35#include <net/ip.h>
34#include <net/ncsi.h> 36#include <net/ncsi.h>
35 37
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 3239d27143b9..bdd8cdd732fb 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -82,9 +82,12 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
82 else 82 else
83 *link_status = 0; 83 *link_status = 0;
84 84
85 ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb, &sfp_prsnt); 85 if (mac_cb->media_type == HNAE_MEDIA_TYPE_FIBER) {
86 if (!ret) 86 ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb,
87 *link_status = *link_status && sfp_prsnt; 87 &sfp_prsnt);
88 if (!ret)
89 *link_status = *link_status && sfp_prsnt;
90 }
88 91
89 mac_cb->link = *link_status; 92 mac_cb->link = *link_status;
90} 93}
@@ -855,7 +858,7 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
855 of_node_put(np); 858 of_node_put(np);
856 859
857 np = of_parse_phandle(to_of_node(mac_cb->fw_port), 860 np = of_parse_phandle(to_of_node(mac_cb->fw_port),
858 "serdes-syscon", 0); 861 "serdes-syscon", 0);
859 syscon = syscon_node_to_regmap(np); 862 syscon = syscon_node_to_regmap(np);
860 of_node_put(np); 863 of_node_put(np);
861 if (IS_ERR_OR_NULL(syscon)) { 864 if (IS_ERR_OR_NULL(syscon)) {
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 90dbda792614..403ea9db6dbd 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -1519,6 +1519,7 @@ static void hns_dsaf_set_mac_key(
1519 mac_key->high.bits.mac_3 = addr[3]; 1519 mac_key->high.bits.mac_3 = addr[3];
1520 mac_key->low.bits.mac_4 = addr[4]; 1520 mac_key->low.bits.mac_4 = addr[4];
1521 mac_key->low.bits.mac_5 = addr[5]; 1521 mac_key->low.bits.mac_5 = addr[5];
1522 mac_key->low.bits.port_vlan = 0;
1522 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_VLAN_M, 1523 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_VLAN_M,
1523 DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id); 1524 DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
1524 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, 1525 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
@@ -2924,10 +2925,11 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
2924 /* find the tcam entry index for promisc */ 2925 /* find the tcam entry index for promisc */
2925 entry_index = dsaf_promisc_tcam_entry(port); 2926 entry_index = dsaf_promisc_tcam_entry(port);
2926 2927
2928 memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
2929 memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
2930
2927 /* config key mask */ 2931 /* config key mask */
2928 if (enable) { 2932 if (enable) {
2929 memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
2930 memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
2931 dsaf_set_field(tbl_tcam_data.low.bits.port_vlan, 2933 dsaf_set_field(tbl_tcam_data.low.bits.port_vlan,
2932 DSAF_TBL_TCAM_KEY_PORT_M, 2934 DSAF_TBL_TCAM_KEY_PORT_M,
2933 DSAF_TBL_TCAM_KEY_PORT_S, port); 2935 DSAF_TBL_TCAM_KEY_PORT_S, port);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index a2c22d084ce9..e13aa064a8e9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -461,6 +461,32 @@ int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
461 return 0; 461 return 0;
462} 462}
463 463
464int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
465{
466 union acpi_object *obj;
467 union acpi_object obj_args, argv4;
468
469 obj_args.integer.type = ACPI_TYPE_INTEGER;
470 obj_args.integer.value = mac_cb->mac_id;
471
472 argv4.type = ACPI_TYPE_PACKAGE,
473 argv4.package.count = 1,
474 argv4.package.elements = &obj_args,
475
476 obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
477 hns_dsaf_acpi_dsm_uuid, 0,
478 HNS_OP_GET_SFP_STAT_FUNC, &argv4);
479
480 if (!obj || obj->type != ACPI_TYPE_INTEGER)
481 return -ENODEV;
482
483 *sfp_prsnt = obj->integer.value;
484
485 ACPI_FREE(obj);
486
487 return 0;
488}
489
464/** 490/**
465 * hns_mac_config_sds_loopback - set loop back for serdes 491 * hns_mac_config_sds_loopback - set loop back for serdes
466 * @mac_cb: mac control block 492 * @mac_cb: mac control block
@@ -592,7 +618,7 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
592 misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi; 618 misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi;
593 619
594 misc_op->get_phy_if = hns_mac_get_phy_if_acpi; 620 misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
595 misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt; 621 misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt_acpi;
596 622
597 misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi; 623 misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi;
598 } else { 624 } else {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5f11b4dc95d2..b23d6545f835 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1257,6 +1257,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1257 release_sub_crq_queue(adapter, 1257 release_sub_crq_queue(adapter,
1258 adapter->tx_scrq[i]); 1258 adapter->tx_scrq[i]);
1259 } 1259 }
1260 kfree(adapter->tx_scrq);
1260 adapter->tx_scrq = NULL; 1261 adapter->tx_scrq = NULL;
1261 } 1262 }
1262 1263
@@ -1269,6 +1270,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1269 release_sub_crq_queue(adapter, 1270 release_sub_crq_queue(adapter,
1270 adapter->rx_scrq[i]); 1271 adapter->rx_scrq[i]);
1271 } 1272 }
1273 kfree(adapter->rx_scrq);
1272 adapter->rx_scrq = NULL; 1274 adapter->rx_scrq = NULL;
1273 } 1275 }
1274} 1276}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 2175cced402f..e9af89ad039c 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6274,8 +6274,8 @@ static int e1000e_pm_freeze(struct device *dev)
6274 /* Quiesce the device without resetting the hardware */ 6274 /* Quiesce the device without resetting the hardware */
6275 e1000e_down(adapter, false); 6275 e1000e_down(adapter, false);
6276 e1000_free_irq(adapter); 6276 e1000_free_irq(adapter);
6277 e1000e_reset_interrupt_capability(adapter);
6278 } 6277 }
6278 e1000e_reset_interrupt_capability(adapter);
6279 6279
6280 /* Allow time for pending master requests to run */ 6280 /* Allow time for pending master requests to run */
6281 e1000e_disable_pcie_master(&adapter->hw); 6281 e1000e_disable_pcie_master(&adapter->hw);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index e8a8351c8ea9..82a95cc2c8ee 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4438,8 +4438,12 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4438 if (!vsi->netdev) 4438 if (!vsi->netdev)
4439 return; 4439 return;
4440 4440
4441 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 4441 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4442 napi_enable(&vsi->q_vectors[q_idx]->napi); 4442 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4443
4444 if (q_vector->rx.ring || q_vector->tx.ring)
4445 napi_enable(&q_vector->napi);
4446 }
4443} 4447}
4444 4448
4445/** 4449/**
@@ -4453,8 +4457,12 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4453 if (!vsi->netdev) 4457 if (!vsi->netdev)
4454 return; 4458 return;
4455 4459
4456 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 4460 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4457 napi_disable(&vsi->q_vectors[q_idx]->napi); 4461 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4462
4463 if (q_vector->rx.ring || q_vector->tx.ring)
4464 napi_disable(&q_vector->napi);
4465 }
4458} 4466}
4459 4467
4460/** 4468/**
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 9e757684816d..93949139e62c 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -613,7 +613,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
613 struct mtk_mac *mac = netdev_priv(dev); 613 struct mtk_mac *mac = netdev_priv(dev);
614 struct mtk_eth *eth = mac->hw; 614 struct mtk_eth *eth = mac->hw;
615 struct mtk_tx_dma *itxd, *txd; 615 struct mtk_tx_dma *itxd, *txd;
616 struct mtk_tx_buf *tx_buf; 616 struct mtk_tx_buf *itx_buf, *tx_buf;
617 dma_addr_t mapped_addr; 617 dma_addr_t mapped_addr;
618 unsigned int nr_frags; 618 unsigned int nr_frags;
619 int i, n_desc = 1; 619 int i, n_desc = 1;
@@ -627,8 +627,8 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
627 fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT; 627 fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
628 txd4 |= fport; 628 txd4 |= fport;
629 629
630 tx_buf = mtk_desc_to_tx_buf(ring, itxd); 630 itx_buf = mtk_desc_to_tx_buf(ring, itxd);
631 memset(tx_buf, 0, sizeof(*tx_buf)); 631 memset(itx_buf, 0, sizeof(*itx_buf));
632 632
633 if (gso) 633 if (gso)
634 txd4 |= TX_DMA_TSO; 634 txd4 |= TX_DMA_TSO;
@@ -647,9 +647,11 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
647 return -ENOMEM; 647 return -ENOMEM;
648 648
649 WRITE_ONCE(itxd->txd1, mapped_addr); 649 WRITE_ONCE(itxd->txd1, mapped_addr);
650 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; 650 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
651 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); 651 itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
652 dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb)); 652 MTK_TX_FLAGS_FPORT1;
653 dma_unmap_addr_set(itx_buf, dma_addr0, mapped_addr);
654 dma_unmap_len_set(itx_buf, dma_len0, skb_headlen(skb));
653 655
654 /* TX SG offload */ 656 /* TX SG offload */
655 txd = itxd; 657 txd = itxd;
@@ -685,11 +687,13 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
685 last_frag * TX_DMA_LS0)); 687 last_frag * TX_DMA_LS0));
686 WRITE_ONCE(txd->txd4, fport); 688 WRITE_ONCE(txd->txd4, fport);
687 689
688 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
689 tx_buf = mtk_desc_to_tx_buf(ring, txd); 690 tx_buf = mtk_desc_to_tx_buf(ring, txd);
690 memset(tx_buf, 0, sizeof(*tx_buf)); 691 memset(tx_buf, 0, sizeof(*tx_buf));
691 692 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
692 tx_buf->flags |= MTK_TX_FLAGS_PAGE0; 693 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
694 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
695 MTK_TX_FLAGS_FPORT1;
696
693 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); 697 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
694 dma_unmap_len_set(tx_buf, dma_len0, frag_map_size); 698 dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
695 frag_size -= frag_map_size; 699 frag_size -= frag_map_size;
@@ -698,7 +702,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
698 } 702 }
699 703
700 /* store skb to cleanup */ 704 /* store skb to cleanup */
701 tx_buf->skb = skb; 705 itx_buf->skb = skb;
702 706
703 WRITE_ONCE(itxd->txd4, txd4); 707 WRITE_ONCE(itxd->txd4, txd4);
704 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | 708 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
@@ -1012,17 +1016,16 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1012 1016
1013 while ((cpu != dma) && budget) { 1017 while ((cpu != dma) && budget) {
1014 u32 next_cpu = desc->txd2; 1018 u32 next_cpu = desc->txd2;
1015 int mac; 1019 int mac = 0;
1016 1020
1017 desc = mtk_qdma_phys_to_virt(ring, desc->txd2); 1021 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1018 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0) 1022 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1019 break; 1023 break;
1020 1024
1021 mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
1022 TX_DMA_FPORT_MASK;
1023 mac--;
1024
1025 tx_buf = mtk_desc_to_tx_buf(ring, desc); 1025 tx_buf = mtk_desc_to_tx_buf(ring, desc);
1026 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
1027 mac = 1;
1028
1026 skb = tx_buf->skb; 1029 skb = tx_buf->skb;
1027 if (!skb) { 1030 if (!skb) {
1028 condition = 1; 1031 condition = 1;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 99b1c8e9f16f..08285a96ff70 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -406,12 +406,18 @@ struct mtk_hw_stats {
406 struct u64_stats_sync syncp; 406 struct u64_stats_sync syncp;
407}; 407};
408 408
409/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how
410 * memory was allocated so that it can be freed properly
411 */
412enum mtk_tx_flags { 409enum mtk_tx_flags {
410 /* PDMA descriptor can point at 1-2 segments. This enum allows us to
411 * track how memory was allocated so that it can be freed properly.
412 */
413 MTK_TX_FLAGS_SINGLE0 = 0x01, 413 MTK_TX_FLAGS_SINGLE0 = 0x01,
414 MTK_TX_FLAGS_PAGE0 = 0x02, 414 MTK_TX_FLAGS_PAGE0 = 0x02,
415
416 /* MTK_TX_FLAGS_FPORTx allows tracking which port the transmitted
417 * SKB out instead of looking up through hardware TX descriptor.
418 */
419 MTK_TX_FLAGS_FPORT0 = 0x04,
420 MTK_TX_FLAGS_FPORT1 = 0x08,
415}; 421};
416 422
417/* This enum allows us to identify how the clock is defined on the array of the 423/* This enum allows us to identify how the clock is defined on the array of the
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index e8c105164931..0e0fa7030565 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2305,6 +2305,17 @@ static int sync_toggles(struct mlx4_dev *dev)
2305 rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)); 2305 rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
2306 if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) { 2306 if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
2307 /* PCI might be offline */ 2307 /* PCI might be offline */
2308
2309 /* If device removal has been requested,
2310 * do not continue retrying.
2311 */
2312 if (dev->persist->interface_state &
2313 MLX4_INTERFACE_STATE_NOWAIT) {
2314 mlx4_warn(dev,
2315 "communication channel is offline\n");
2316 return -EIO;
2317 }
2318
2308 msleep(100); 2319 msleep(100);
2309 wr_toggle = swab32(readl(&priv->mfunc.comm-> 2320 wr_toggle = swab32(readl(&priv->mfunc.comm->
2310 slave_write)); 2321 slave_write));
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 21377c315083..703205475524 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1940,6 +1940,14 @@ static int mlx4_comm_check_offline(struct mlx4_dev *dev)
1940 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); 1940 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
1941 if (!offline_bit) 1941 if (!offline_bit)
1942 return 0; 1942 return 0;
1943
1944 /* If device removal has been requested,
1945 * do not continue retrying.
1946 */
1947 if (dev->persist->interface_state &
1948 MLX4_INTERFACE_STATE_NOWAIT)
1949 break;
1950
1943 /* There are cases as part of AER/Reset flow that PF needs 1951 /* There are cases as part of AER/Reset flow that PF needs
1944 * around 100 msec to load. We therefore sleep for 100 msec 1952 * around 100 msec to load. We therefore sleep for 100 msec
1945 * to allow other tasks to make use of that CPU during this 1953 * to allow other tasks to make use of that CPU during this
@@ -3955,6 +3963,9 @@ static void mlx4_remove_one(struct pci_dev *pdev)
3955 struct devlink *devlink = priv_to_devlink(priv); 3963 struct devlink *devlink = priv_to_devlink(priv);
3956 int active_vfs = 0; 3964 int active_vfs = 0;
3957 3965
3966 if (mlx4_is_slave(dev))
3967 persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
3968
3958 mutex_lock(&persist->interface_state_mutex); 3969 mutex_lock(&persist->interface_state_mutex);
3959 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; 3970 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
3960 mutex_unlock(&persist->interface_state_mutex); 3971 mutex_unlock(&persist->interface_state_mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index caa837e5e2b9..a380353a78c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -361,6 +361,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
361 case MLX5_CMD_OP_QUERY_VPORT_COUNTER: 361 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
362 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 362 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
363 case MLX5_CMD_OP_QUERY_Q_COUNTER: 363 case MLX5_CMD_OP_QUERY_Q_COUNTER:
364 case MLX5_CMD_OP_SET_RATE_LIMIT:
365 case MLX5_CMD_OP_QUERY_RATE_LIMIT:
364 case MLX5_CMD_OP_ALLOC_PD: 366 case MLX5_CMD_OP_ALLOC_PD:
365 case MLX5_CMD_OP_ALLOC_UAR: 367 case MLX5_CMD_OP_ALLOC_UAR:
366 case MLX5_CMD_OP_CONFIG_INT_MODERATION: 368 case MLX5_CMD_OP_CONFIG_INT_MODERATION:
@@ -497,6 +499,8 @@ const char *mlx5_command_str(int command)
497 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); 499 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
498 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); 500 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
499 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); 501 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
502 MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
503 MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
500 MLX5_COMMAND_STR_CASE(ALLOC_PD); 504 MLX5_COMMAND_STR_CASE(ALLOC_PD);
501 MLX5_COMMAND_STR_CASE(DEALLOC_PD); 505 MLX5_COMMAND_STR_CASE(DEALLOC_PD);
502 MLX5_COMMAND_STR_CASE(ALLOC_UAR); 506 MLX5_COMMAND_STR_CASE(ALLOC_UAR);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index f6a6ded204f6..dc52053128bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -928,10 +928,6 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv);
928int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); 928int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
929void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); 929void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
930u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); 930u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
931void mlx5e_add_vxlan_port(struct net_device *netdev,
932 struct udp_tunnel_info *ti);
933void mlx5e_del_vxlan_port(struct net_device *netdev,
934 struct udp_tunnel_info *ti);
935 931
936int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, 932int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
937 void *sp); 933 void *sp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 8ef64c4db2c2..66c133757a5e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3100,8 +3100,8 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
3100 vf_stats); 3100 vf_stats);
3101} 3101}
3102 3102
3103void mlx5e_add_vxlan_port(struct net_device *netdev, 3103static void mlx5e_add_vxlan_port(struct net_device *netdev,
3104 struct udp_tunnel_info *ti) 3104 struct udp_tunnel_info *ti)
3105{ 3105{
3106 struct mlx5e_priv *priv = netdev_priv(netdev); 3106 struct mlx5e_priv *priv = netdev_priv(netdev);
3107 3107
@@ -3114,8 +3114,8 @@ void mlx5e_add_vxlan_port(struct net_device *netdev,
3114 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1); 3114 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
3115} 3115}
3116 3116
3117void mlx5e_del_vxlan_port(struct net_device *netdev, 3117static void mlx5e_del_vxlan_port(struct net_device *netdev,
3118 struct udp_tunnel_info *ti) 3118 struct udp_tunnel_info *ti)
3119{ 3119{
3120 struct mlx5e_priv *priv = netdev_priv(netdev); 3120 struct mlx5e_priv *priv = netdev_priv(netdev);
3121 3121
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 2c864574a9d5..f621373bd7a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -393,8 +393,6 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
393 .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, 393 .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
394 .ndo_setup_tc = mlx5e_rep_ndo_setup_tc, 394 .ndo_setup_tc = mlx5e_rep_ndo_setup_tc,
395 .ndo_get_stats64 = mlx5e_rep_get_stats, 395 .ndo_get_stats64 = mlx5e_rep_get_stats,
396 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
397 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
398 .ndo_has_offload_stats = mlx5e_has_offload_stats, 396 .ndo_has_offload_stats = mlx5e_has_offload_stats,
399 .ndo_get_offload_stats = mlx5e_get_offload_stats, 397 .ndo_get_offload_stats = mlx5e_get_offload_stats,
400}; 398};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 3d371688fbbb..bafcb349a50c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -601,6 +601,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
601 if (lro_num_seg > 1) { 601 if (lro_num_seg > 1) {
602 mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); 602 mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
603 skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); 603 skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
604 /* Subtract one since we already counted this as one
605 * "regular" packet in mlx5e_complete_rx_cqe()
606 */
607 rq->stats.packets += lro_num_seg - 1;
604 rq->stats.lro_packets++; 608 rq->stats.lro_packets++;
605 rq->stats.lro_bytes += cqe_bcnt; 609 rq->stats.lro_bytes += cqe_bcnt;
606 } 610 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 79481f4cf264..fade7233dac5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -133,6 +133,23 @@ err_create_ft:
133 return rule; 133 return rule;
134} 134}
135 135
136static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
137 struct mlx5e_tc_flow *flow)
138{
139 struct mlx5_fc *counter = NULL;
140
141 if (!IS_ERR(flow->rule)) {
142 counter = mlx5_flow_rule_counter(flow->rule);
143 mlx5_del_flow_rules(flow->rule);
144 mlx5_fc_destroy(priv->mdev, counter);
145 }
146
147 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
148 mlx5_destroy_flow_table(priv->fs.tc.t);
149 priv->fs.tc.t = NULL;
150 }
151}
152
136static struct mlx5_flow_handle * 153static struct mlx5_flow_handle *
137mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, 154mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
138 struct mlx5_flow_spec *spec, 155 struct mlx5_flow_spec *spec,
@@ -149,7 +166,24 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
149} 166}
150 167
151static void mlx5e_detach_encap(struct mlx5e_priv *priv, 168static void mlx5e_detach_encap(struct mlx5e_priv *priv,
152 struct mlx5e_tc_flow *flow) { 169 struct mlx5e_tc_flow *flow);
170
171static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
172 struct mlx5e_tc_flow *flow)
173{
174 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
175
176 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->attr);
177
178 mlx5_eswitch_del_vlan_action(esw, flow->attr);
179
180 if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
181 mlx5e_detach_encap(priv, flow);
182}
183
184static void mlx5e_detach_encap(struct mlx5e_priv *priv,
185 struct mlx5e_tc_flow *flow)
186{
153 struct list_head *next = flow->encap.next; 187 struct list_head *next = flow->encap.next;
154 188
155 list_del(&flow->encap); 189 list_del(&flow->encap);
@@ -173,25 +207,10 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
173static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, 207static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
174 struct mlx5e_tc_flow *flow) 208 struct mlx5e_tc_flow *flow)
175{ 209{
176 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 210 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
177 struct mlx5_fc *counter = NULL; 211 mlx5e_tc_del_fdb_flow(priv, flow);
178 212 else
179 if (!IS_ERR(flow->rule)) { 213 mlx5e_tc_del_nic_flow(priv, flow);
180 counter = mlx5_flow_rule_counter(flow->rule);
181 mlx5_del_flow_rules(flow->rule);
182 mlx5_fc_destroy(priv->mdev, counter);
183 }
184
185 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
186 mlx5_eswitch_del_vlan_action(esw, flow->attr);
187 if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
188 mlx5e_detach_encap(priv, flow);
189 }
190
191 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
192 mlx5_destroy_flow_table(priv->fs.tc.t);
193 priv->fs.tc.t = NULL;
194 }
195} 214}
196 215
197static void parse_vxlan_attr(struct mlx5_flow_spec *spec, 216static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
@@ -248,12 +267,15 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
248 skb_flow_dissector_target(f->dissector, 267 skb_flow_dissector_target(f->dissector,
249 FLOW_DISSECTOR_KEY_ENC_PORTS, 268 FLOW_DISSECTOR_KEY_ENC_PORTS,
250 f->mask); 269 f->mask);
270 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
271 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
272 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
251 273
252 /* Full udp dst port must be given */ 274 /* Full udp dst port must be given */
253 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) 275 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
254 goto vxlan_match_offload_err; 276 goto vxlan_match_offload_err;
255 277
256 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) && 278 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
257 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) 279 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
258 parse_vxlan_attr(spec, f); 280 parse_vxlan_attr(spec, f);
259 else { 281 else {
@@ -976,6 +998,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
976 struct mlx5_esw_flow_attr *attr) 998 struct mlx5_esw_flow_attr *attr)
977{ 999{
978 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 1000 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1001 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
1002 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
979 unsigned short family = ip_tunnel_info_af(tun_info); 1003 unsigned short family = ip_tunnel_info_af(tun_info);
980 struct ip_tunnel_key *key = &tun_info->key; 1004 struct ip_tunnel_key *key = &tun_info->key;
981 struct mlx5_encap_entry *e; 1005 struct mlx5_encap_entry *e;
@@ -996,7 +1020,7 @@ vxlan_encap_offload_err:
996 return -EOPNOTSUPP; 1020 return -EOPNOTSUPP;
997 } 1021 }
998 1022
999 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) && 1023 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
1000 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { 1024 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
1001 tunnel_type = MLX5_HEADER_TYPE_VXLAN; 1025 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
1002 } else { 1026 } else {
@@ -1112,14 +1136,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1112 } 1136 }
1113 1137
1114 if (is_tcf_vlan(a)) { 1138 if (is_tcf_vlan(a)) {
1115 if (tcf_vlan_action(a) == VLAN_F_POP) { 1139 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
1116 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; 1140 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
1117 } else if (tcf_vlan_action(a) == VLAN_F_PUSH) { 1141 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
1118 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q)) 1142 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
1119 return -EOPNOTSUPP; 1143 return -EOPNOTSUPP;
1120 1144
1121 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; 1145 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
1122 attr->vlan = tcf_vlan_push_vid(a); 1146 attr->vlan = tcf_vlan_push_vid(a);
1147 } else { /* action is TCA_VLAN_ACT_MODIFY */
1148 return -EOPNOTSUPP;
1123 } 1149 }
1124 continue; 1150 continue;
1125 } 1151 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index f193128bac4b..57f5e2d7ebd1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -274,15 +274,18 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
274 sq->stats.tso_bytes += skb->len - ihs; 274 sq->stats.tso_bytes += skb->len - ihs;
275 } 275 }
276 276
277 sq->stats.packets += skb_shinfo(skb)->gso_segs;
277 num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; 278 num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
278 } else { 279 } else {
279 bf = sq->bf_budget && 280 bf = sq->bf_budget &&
280 !skb->xmit_more && 281 !skb->xmit_more &&
281 !skb_shinfo(skb)->nr_frags; 282 !skb_shinfo(skb)->nr_frags;
282 ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); 283 ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
284 sq->stats.packets++;
283 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); 285 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
284 } 286 }
285 287
288 sq->stats.bytes += num_bytes;
286 wi->num_bytes = num_bytes; 289 wi->num_bytes = num_bytes;
287 290
288 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; 291 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
@@ -381,8 +384,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
381 if (bf) 384 if (bf)
382 sq->bf_budget--; 385 sq->bf_budget--;
383 386
384 sq->stats.packets++;
385 sq->stats.bytes += num_bytes;
386 return NETDEV_TX_OK; 387 return NETDEV_TX_OK;
387 388
388dma_unmap_wqe_err: 389dma_unmap_wqe_err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 5b78883d5654..ad329b1680b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -209,6 +209,7 @@ struct mlx5_esw_offload {
209 struct mlx5_eswitch_rep *vport_reps; 209 struct mlx5_eswitch_rep *vport_reps;
210 DECLARE_HASHTABLE(encap_tbl, 8); 210 DECLARE_HASHTABLE(encap_tbl, 8);
211 u8 inline_mode; 211 u8 inline_mode;
212 u64 num_flows;
212}; 213};
213 214
214struct mlx5_eswitch { 215struct mlx5_eswitch {
@@ -271,6 +272,11 @@ struct mlx5_flow_handle *
271mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, 272mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
272 struct mlx5_flow_spec *spec, 273 struct mlx5_flow_spec *spec,
273 struct mlx5_esw_flow_attr *attr); 274 struct mlx5_esw_flow_attr *attr);
275void
276mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
277 struct mlx5_flow_handle *rule,
278 struct mlx5_esw_flow_attr *attr);
279
274struct mlx5_flow_handle * 280struct mlx5_flow_handle *
275mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn); 281mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
276 282
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 4f5b0d47d5f3..307ec6c5fd3b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -93,10 +93,27 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
93 spec, &flow_act, dest, i); 93 spec, &flow_act, dest, i);
94 if (IS_ERR(rule)) 94 if (IS_ERR(rule))
95 mlx5_fc_destroy(esw->dev, counter); 95 mlx5_fc_destroy(esw->dev, counter);
96 else
97 esw->offloads.num_flows++;
96 98
97 return rule; 99 return rule;
98} 100}
99 101
102void
103mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
104 struct mlx5_flow_handle *rule,
105 struct mlx5_esw_flow_attr *attr)
106{
107 struct mlx5_fc *counter = NULL;
108
109 if (!IS_ERR(rule)) {
110 counter = mlx5_flow_rule_counter(rule);
111 mlx5_del_flow_rules(rule);
112 mlx5_fc_destroy(esw->dev, counter);
113 esw->offloads.num_flows--;
114 }
115}
116
100static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) 117static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
101{ 118{
102 struct mlx5_eswitch_rep *rep; 119 struct mlx5_eswitch_rep *rep;
@@ -908,6 +925,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
908 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) 925 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
909 return -EOPNOTSUPP; 926 return -EOPNOTSUPP;
910 927
928 if (esw->offloads.num_flows > 0) {
929 esw_warn(dev, "Can't set inline mode when flows are configured\n");
930 return -EOPNOTSUPP;
931 }
932
911 err = esw_inline_mode_from_devlink(mode, &mlx5_mode); 933 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
912 if (err) 934 if (err)
913 goto out; 935 goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 55957246c0e8..b5d5519542e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -294,7 +294,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
294 struct netdev_notifier_changeupper_info *info) 294 struct netdev_notifier_changeupper_info *info)
295{ 295{
296 struct net_device *upper = info->upper_dev, *ndev_tmp; 296 struct net_device *upper = info->upper_dev, *ndev_tmp;
297 struct netdev_lag_upper_info *lag_upper_info; 297 struct netdev_lag_upper_info *lag_upper_info = NULL;
298 bool is_bonded; 298 bool is_bonded;
299 int bond_status = 0; 299 int bond_status = 0;
300 int num_slaves = 0; 300 int num_slaves = 0;
@@ -303,7 +303,8 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
303 if (!netif_is_lag_master(upper)) 303 if (!netif_is_lag_master(upper))
304 return 0; 304 return 0;
305 305
306 lag_upper_info = info->upper_info; 306 if (info->linking)
307 lag_upper_info = info->upper_info;
307 308
308 /* The event may still be of interest if the slave does not belong to 309 /* The event may still be of interest if the slave does not belong to
309 * us, but is enslaved to a master which has one or more of our netdevs 310 * us, but is enslaved to a master which has one or more of our netdevs
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index e2bd600d19de..60154a175bd3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -87,7 +87,7 @@ static struct mlx5_profile profile[] = {
87 [2] = { 87 [2] = {
88 .mask = MLX5_PROF_MASK_QP_SIZE | 88 .mask = MLX5_PROF_MASK_QP_SIZE |
89 MLX5_PROF_MASK_MR_CACHE, 89 MLX5_PROF_MASK_MR_CACHE,
90 .log_max_qp = 17, 90 .log_max_qp = 18,
91 .mr_cache[0] = { 91 .mr_cache[0] = {
92 .size = 500, 92 .size = 500,
93 .limit = 250 93 .limit = 250
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 06c9f4100cb9..6ad44be08b33 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -25,6 +25,7 @@
25#include <linux/of_irq.h> 25#include <linux/of_irq.h>
26#include <linux/crc32.h> 26#include <linux/crc32.h>
27#include <linux/crc32c.h> 27#include <linux/crc32c.h>
28#include <linux/circ_buf.h>
28 29
29#include "moxart_ether.h" 30#include "moxart_ether.h"
30 31
@@ -278,6 +279,13 @@ rx_next:
278 return rx; 279 return rx;
279} 280}
280 281
282static int moxart_tx_queue_space(struct net_device *ndev)
283{
284 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
285
286 return CIRC_SPACE(priv->tx_head, priv->tx_tail, TX_DESC_NUM);
287}
288
281static void moxart_tx_finished(struct net_device *ndev) 289static void moxart_tx_finished(struct net_device *ndev)
282{ 290{
283 struct moxart_mac_priv_t *priv = netdev_priv(ndev); 291 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
@@ -297,6 +305,9 @@ static void moxart_tx_finished(struct net_device *ndev)
297 tx_tail = TX_NEXT(tx_tail); 305 tx_tail = TX_NEXT(tx_tail);
298 } 306 }
299 priv->tx_tail = tx_tail; 307 priv->tx_tail = tx_tail;
308 if (netif_queue_stopped(ndev) &&
309 moxart_tx_queue_space(ndev) >= TX_WAKE_THRESHOLD)
310 netif_wake_queue(ndev);
300} 311}
301 312
302static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id) 313static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
@@ -324,13 +335,18 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
324 struct moxart_mac_priv_t *priv = netdev_priv(ndev); 335 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
325 void *desc; 336 void *desc;
326 unsigned int len; 337 unsigned int len;
327 unsigned int tx_head = priv->tx_head; 338 unsigned int tx_head;
328 u32 txdes1; 339 u32 txdes1;
329 int ret = NETDEV_TX_BUSY; 340 int ret = NETDEV_TX_BUSY;
330 341
342 spin_lock_irq(&priv->txlock);
343
344 tx_head = priv->tx_head;
331 desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head); 345 desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
332 346
333 spin_lock_irq(&priv->txlock); 347 if (moxart_tx_queue_space(ndev) == 1)
348 netif_stop_queue(ndev);
349
334 if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) { 350 if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
335 net_dbg_ratelimited("no TX space for packet\n"); 351 net_dbg_ratelimited("no TX space for packet\n");
336 priv->stats.tx_dropped++; 352 priv->stats.tx_dropped++;
diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h
index 93a9563ac7c6..afc32ec998c0 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.h
+++ b/drivers/net/ethernet/moxa/moxart_ether.h
@@ -59,6 +59,7 @@
59#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM_MASK)) 59#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM_MASK))
60#define TX_BUF_SIZE 1600 60#define TX_BUF_SIZE 1600
61#define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK+1) 61#define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK+1)
62#define TX_WAKE_THRESHOLD 16
62 63
63#define RX_DESC_NUM 64 64#define RX_DESC_NUM 64
64#define RX_DESC_NUM_MASK (RX_DESC_NUM-1) 65#define RX_DESC_NUM_MASK (RX_DESC_NUM-1)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 9179a99563af..a41377e26c07 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -3275,9 +3275,10 @@ void nfp_net_netdev_clean(struct net_device *netdev)
3275{ 3275{
3276 struct nfp_net *nn = netdev_priv(netdev); 3276 struct nfp_net *nn = netdev_priv(netdev);
3277 3277
3278 unregister_netdev(nn->netdev);
3279
3278 if (nn->xdp_prog) 3280 if (nn->xdp_prog)
3279 bpf_prog_put(nn->xdp_prog); 3281 bpf_prog_put(nn->xdp_prog);
3280 if (nn->bpf_offload_xdp) 3282 if (nn->bpf_offload_xdp)
3281 nfp_net_xdp_offload(nn, NULL); 3283 nfp_net_xdp_offload(nn, NULL);
3282 unregister_netdev(nn->netdev);
3283} 3284}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 5bd36a4a8fcd..a6e2bbe629bd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -583,6 +583,13 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
583 p_params->ets_cbs, 583 p_params->ets_cbs,
584 p_ets->pri_tc_tbl[0], p_params->max_ets_tc); 584 p_ets->pri_tc_tbl[0], p_params->max_ets_tc);
585 585
586 if (p_params->ets_enabled && !p_params->max_ets_tc) {
587 p_params->max_ets_tc = QED_MAX_PFC_PRIORITIES;
588 DP_VERBOSE(p_hwfn, QED_MSG_DCB,
589 "ETS params: max_ets_tc is forced to %d\n",
590 p_params->max_ets_tc);
591 }
592
586 /* 8 bit tsa and bw data corresponding to each of the 8 TC's are 593 /* 8 bit tsa and bw data corresponding to each of the 8 TC's are
587 * encoded in a type u32 array of size 2. 594 * encoded in a type u32 array of size 2.
588 */ 595 */
@@ -1001,6 +1008,8 @@ qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn,
1001 u8 pfc_map = 0; 1008 u8 pfc_map = 0;
1002 int i; 1009 int i;
1003 1010
1011 *pfc &= ~DCBX_PFC_ERROR_MASK;
1012
1004 if (p_params->pfc.willing) 1013 if (p_params->pfc.willing)
1005 *pfc |= DCBX_PFC_WILLING_MASK; 1014 *pfc |= DCBX_PFC_WILLING_MASK;
1006 else 1015 else
@@ -1255,7 +1264,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
1255{ 1264{
1256 struct qed_dcbx_get *dcbx_info; 1265 struct qed_dcbx_get *dcbx_info;
1257 1266
1258 dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL); 1267 dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_ATOMIC);
1259 if (!dcbx_info) 1268 if (!dcbx_info)
1260 return NULL; 1269 return NULL;
1261 1270
@@ -2073,6 +2082,8 @@ static int qed_dcbnl_ieee_setpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
2073 for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) 2082 for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
2074 dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i)); 2083 dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i));
2075 2084
2085 dcbx_set.config.params.pfc.max_tc = pfc->pfc_cap;
2086
2076 ptt = qed_ptt_acquire(hwfn); 2087 ptt = qed_ptt_acquire(hwfn);
2077 if (!ptt) 2088 if (!ptt)
2078 return -EINVAL; 2089 return -EINVAL;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 54248775f227..f68c4db656ed 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1127,12 +1127,70 @@ static struct mdiobb_ops bb_ops = {
1127 .get_mdio_data = sh_get_mdio, 1127 .get_mdio_data = sh_get_mdio,
1128}; 1128};
1129 1129
1130/* free Tx skb function */
1131static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
1132{
1133 struct sh_eth_private *mdp = netdev_priv(ndev);
1134 struct sh_eth_txdesc *txdesc;
1135 int free_num = 0;
1136 int entry;
1137 bool sent;
1138
1139 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1140 entry = mdp->dirty_tx % mdp->num_tx_ring;
1141 txdesc = &mdp->tx_ring[entry];
1142 sent = !(txdesc->status & cpu_to_le32(TD_TACT));
1143 if (sent_only && !sent)
1144 break;
1145 /* TACT bit must be checked before all the following reads */
1146 dma_rmb();
1147 netif_info(mdp, tx_done, ndev,
1148 "tx entry %d status 0x%08x\n",
1149 entry, le32_to_cpu(txdesc->status));
1150 /* Free the original skb. */
1151 if (mdp->tx_skbuff[entry]) {
1152 dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
1153 le32_to_cpu(txdesc->len) >> 16,
1154 DMA_TO_DEVICE);
1155 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1156 mdp->tx_skbuff[entry] = NULL;
1157 free_num++;
1158 }
1159 txdesc->status = cpu_to_le32(TD_TFP);
1160 if (entry >= mdp->num_tx_ring - 1)
1161 txdesc->status |= cpu_to_le32(TD_TDLE);
1162
1163 if (sent) {
1164 ndev->stats.tx_packets++;
1165 ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
1166 }
1167 }
1168 return free_num;
1169}
1170
1130/* free skb and descriptor buffer */ 1171/* free skb and descriptor buffer */
1131static void sh_eth_ring_free(struct net_device *ndev) 1172static void sh_eth_ring_free(struct net_device *ndev)
1132{ 1173{
1133 struct sh_eth_private *mdp = netdev_priv(ndev); 1174 struct sh_eth_private *mdp = netdev_priv(ndev);
1134 int ringsize, i; 1175 int ringsize, i;
1135 1176
1177 if (mdp->rx_ring) {
1178 for (i = 0; i < mdp->num_rx_ring; i++) {
1179 if (mdp->rx_skbuff[i]) {
1180 struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
1181
1182 dma_unmap_single(&ndev->dev,
1183 le32_to_cpu(rxdesc->addr),
1184 ALIGN(mdp->rx_buf_sz, 32),
1185 DMA_FROM_DEVICE);
1186 }
1187 }
1188 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1189 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1190 mdp->rx_desc_dma);
1191 mdp->rx_ring = NULL;
1192 }
1193
1136 /* Free Rx skb ringbuffer */ 1194 /* Free Rx skb ringbuffer */
1137 if (mdp->rx_skbuff) { 1195 if (mdp->rx_skbuff) {
1138 for (i = 0; i < mdp->num_rx_ring; i++) 1196 for (i = 0; i < mdp->num_rx_ring; i++)
@@ -1141,27 +1199,18 @@ static void sh_eth_ring_free(struct net_device *ndev)
1141 kfree(mdp->rx_skbuff); 1199 kfree(mdp->rx_skbuff);
1142 mdp->rx_skbuff = NULL; 1200 mdp->rx_skbuff = NULL;
1143 1201
1144 /* Free Tx skb ringbuffer */
1145 if (mdp->tx_skbuff) {
1146 for (i = 0; i < mdp->num_tx_ring; i++)
1147 dev_kfree_skb(mdp->tx_skbuff[i]);
1148 }
1149 kfree(mdp->tx_skbuff);
1150 mdp->tx_skbuff = NULL;
1151
1152 if (mdp->rx_ring) {
1153 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1154 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1155 mdp->rx_desc_dma);
1156 mdp->rx_ring = NULL;
1157 }
1158
1159 if (mdp->tx_ring) { 1202 if (mdp->tx_ring) {
1203 sh_eth_tx_free(ndev, false);
1204
1160 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 1205 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1161 dma_free_coherent(NULL, ringsize, mdp->tx_ring, 1206 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1162 mdp->tx_desc_dma); 1207 mdp->tx_desc_dma);
1163 mdp->tx_ring = NULL; 1208 mdp->tx_ring = NULL;
1164 } 1209 }
1210
1211 /* Free Tx skb ringbuffer */
1212 kfree(mdp->tx_skbuff);
1213 mdp->tx_skbuff = NULL;
1165} 1214}
1166 1215
1167/* format skb and descriptor buffer */ 1216/* format skb and descriptor buffer */
@@ -1409,43 +1458,6 @@ static void sh_eth_dev_exit(struct net_device *ndev)
1409 update_mac_address(ndev); 1458 update_mac_address(ndev);
1410} 1459}
1411 1460
1412/* free Tx skb function */
1413static int sh_eth_txfree(struct net_device *ndev)
1414{
1415 struct sh_eth_private *mdp = netdev_priv(ndev);
1416 struct sh_eth_txdesc *txdesc;
1417 int free_num = 0;
1418 int entry;
1419
1420 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1421 entry = mdp->dirty_tx % mdp->num_tx_ring;
1422 txdesc = &mdp->tx_ring[entry];
1423 if (txdesc->status & cpu_to_le32(TD_TACT))
1424 break;
1425 /* TACT bit must be checked before all the following reads */
1426 dma_rmb();
1427 netif_info(mdp, tx_done, ndev,
1428 "tx entry %d status 0x%08x\n",
1429 entry, le32_to_cpu(txdesc->status));
1430 /* Free the original skb. */
1431 if (mdp->tx_skbuff[entry]) {
1432 dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
1433 le32_to_cpu(txdesc->len) >> 16,
1434 DMA_TO_DEVICE);
1435 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1436 mdp->tx_skbuff[entry] = NULL;
1437 free_num++;
1438 }
1439 txdesc->status = cpu_to_le32(TD_TFP);
1440 if (entry >= mdp->num_tx_ring - 1)
1441 txdesc->status |= cpu_to_le32(TD_TDLE);
1442
1443 ndev->stats.tx_packets++;
1444 ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
1445 }
1446 return free_num;
1447}
1448
1449/* Packet receive function */ 1461/* Packet receive function */
1450static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) 1462static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1451{ 1463{
@@ -1690,7 +1702,7 @@ static void sh_eth_error(struct net_device *ndev, u32 intr_status)
1690 intr_status, mdp->cur_tx, mdp->dirty_tx, 1702 intr_status, mdp->cur_tx, mdp->dirty_tx,
1691 (u32)ndev->state, edtrr); 1703 (u32)ndev->state, edtrr);
1692 /* dirty buffer free */ 1704 /* dirty buffer free */
1693 sh_eth_txfree(ndev); 1705 sh_eth_tx_free(ndev, true);
1694 1706
1695 /* SH7712 BUG */ 1707 /* SH7712 BUG */
1696 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { 1708 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
@@ -1751,7 +1763,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1751 /* Clear Tx interrupts */ 1763 /* Clear Tx interrupts */
1752 sh_eth_write(ndev, intr_status & cd->tx_check, EESR); 1764 sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1753 1765
1754 sh_eth_txfree(ndev); 1766 sh_eth_tx_free(ndev, true);
1755 netif_wake_queue(ndev); 1767 netif_wake_queue(ndev);
1756 } 1768 }
1757 1769
@@ -2412,7 +2424,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2412 2424
2413 spin_lock_irqsave(&mdp->lock, flags); 2425 spin_lock_irqsave(&mdp->lock, flags);
2414 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { 2426 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2415 if (!sh_eth_txfree(ndev)) { 2427 if (!sh_eth_tx_free(ndev, true)) {
2416 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n"); 2428 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2417 netif_stop_queue(ndev); 2429 netif_stop_queue(ndev);
2418 spin_unlock_irqrestore(&mdp->lock, flags); 2430 spin_unlock_irqrestore(&mdp->lock, flags);
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 7cd76b6b5cb9..2ae852454780 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -2216,18 +2216,15 @@ static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
2216{ 2216{
2217 bool want[OFDPA_CTRL_MAX] = { 0, }; 2217 bool want[OFDPA_CTRL_MAX] = { 0, };
2218 bool prev_ctrls[OFDPA_CTRL_MAX]; 2218 bool prev_ctrls[OFDPA_CTRL_MAX];
2219 u8 uninitialized_var(prev_state); 2219 u8 prev_state;
2220 int err; 2220 int err;
2221 int i; 2221 int i;
2222 2222
2223 if (switchdev_trans_ph_prepare(trans)) { 2223 prev_state = ofdpa_port->stp_state;
2224 memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls)); 2224 if (prev_state == state)
2225 prev_state = ofdpa_port->stp_state;
2226 }
2227
2228 if (ofdpa_port->stp_state == state)
2229 return 0; 2225 return 0;
2230 2226
2227 memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
2231 ofdpa_port->stp_state = state; 2228 ofdpa_port->stp_state = state;
2232 2229
2233 switch (state) { 2230 switch (state) {
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 334bcc6df6b2..b9cb697b2818 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1371,6 +1371,13 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
1371 free_cpumask_var(thread_mask); 1371 free_cpumask_var(thread_mask);
1372 } 1372 }
1373 1373
1374 if (count > EFX_MAX_RX_QUEUES) {
1375 netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
1376 "Reducing number of rx queues from %u to %u.\n",
1377 count, EFX_MAX_RX_QUEUES);
1378 count = EFX_MAX_RX_QUEUES;
1379 }
1380
1374 /* If RSS is requested for the PF *and* VFs then we can't write RSS 1381 /* If RSS is requested for the PF *and* VFs then we can't write RSS
1375 * table entries that are inaccessible to VFs 1382 * table entries that are inaccessible to VFs
1376 */ 1383 */
@@ -2404,7 +2411,7 @@ static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *t
2404 tnl.type = (u16)efx_tunnel_type; 2411 tnl.type = (u16)efx_tunnel_type;
2405 tnl.port = ti->port; 2412 tnl.port = ti->port;
2406 2413
2407 if (efx->type->udp_tnl_add_port) 2414 if (efx->type->udp_tnl_del_port)
2408 (void)efx->type->udp_tnl_del_port(efx, tnl); 2415 (void)efx->type->udp_tnl_del_port(efx, tnl);
2409} 2416}
2410 2417
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index f5e5cd1659a1..29614da91cbf 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -1354,6 +1354,13 @@ static unsigned int ef4_wanted_parallelism(struct ef4_nic *efx)
1354 free_cpumask_var(thread_mask); 1354 free_cpumask_var(thread_mask);
1355 } 1355 }
1356 1356
1357 if (count > EF4_MAX_RX_QUEUES) {
1358 netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
1359 "Reducing number of rx queues from %u to %u.\n",
1360 count, EF4_MAX_RX_QUEUES);
1361 count = EF4_MAX_RX_QUEUES;
1362 }
1363
1357 return count; 1364 return count;
1358} 1365}
1359 1366
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 296c8efd0038..9e631952b86f 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -74,15 +74,21 @@ config TI_CPSW
74 will be called cpsw. 74 will be called cpsw.
75 75
76config TI_CPTS 76config TI_CPTS
77 tristate "TI Common Platform Time Sync (CPTS) Support" 77 bool "TI Common Platform Time Sync (CPTS) Support"
78 depends on TI_CPSW || TI_KEYSTONE_NETCP 78 depends on TI_CPSW || TI_KEYSTONE_NETCP
79 imply PTP_1588_CLOCK 79 depends on PTP_1588_CLOCK
80 ---help--- 80 ---help---
81 This driver supports the Common Platform Time Sync unit of 81 This driver supports the Common Platform Time Sync unit of
82 the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem. 82 the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem.
83 The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the 83 The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the
84 driver offers a PTP Hardware Clock. 84 driver offers a PTP Hardware Clock.
85 85
86config TI_CPTS_MOD
87 tristate
88 depends on TI_CPTS
89 default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y
90 default m
91
86config TI_KEYSTONE_NETCP 92config TI_KEYSTONE_NETCP
87 tristate "TI Keystone NETCP Core Support" 93 tristate "TI Keystone NETCP Core Support"
88 select TI_CPSW_ALE 94 select TI_CPSW_ALE
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 1e7c10bf8713..10e6b0ce51ba 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -12,7 +12,7 @@ obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
12obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o 12obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
13obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o 13obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
14obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o 14obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o
15obj-$(CONFIG_TI_CPTS) += cpts.o 15obj-$(CONFIG_TI_CPTS_MOD) += cpts.o
16obj-$(CONFIG_TI_CPSW) += ti_cpsw.o 16obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
17ti_cpsw-y := cpsw.o 17ti_cpsw-y := cpsw.o
18 18
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 9f3d9c67e3fe..fa674a8bda0c 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1267,6 +1267,7 @@ static void soft_reset_slave(struct cpsw_slave *slave)
1267static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) 1267static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1268{ 1268{
1269 u32 slave_port; 1269 u32 slave_port;
1270 struct phy_device *phy;
1270 struct cpsw_common *cpsw = priv->cpsw; 1271 struct cpsw_common *cpsw = priv->cpsw;
1271 1272
1272 soft_reset_slave(slave); 1273 soft_reset_slave(slave);
@@ -1300,27 +1301,28 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1300 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); 1301 1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
1301 1302
1302 if (slave->data->phy_node) { 1303 if (slave->data->phy_node) {
1303 slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node, 1304 phy = of_phy_connect(priv->ndev, slave->data->phy_node,
1304 &cpsw_adjust_link, 0, slave->data->phy_if); 1305 &cpsw_adjust_link, 0, slave->data->phy_if);
1305 if (!slave->phy) { 1306 if (!phy) {
1306 dev_err(priv->dev, "phy \"%s\" not found on slave %d\n", 1307 dev_err(priv->dev, "phy \"%s\" not found on slave %d\n",
1307 slave->data->phy_node->full_name, 1308 slave->data->phy_node->full_name,
1308 slave->slave_num); 1309 slave->slave_num);
1309 return; 1310 return;
1310 } 1311 }
1311 } else { 1312 } else {
1312 slave->phy = phy_connect(priv->ndev, slave->data->phy_id, 1313 phy = phy_connect(priv->ndev, slave->data->phy_id,
1313 &cpsw_adjust_link, slave->data->phy_if); 1314 &cpsw_adjust_link, slave->data->phy_if);
1314 if (IS_ERR(slave->phy)) { 1315 if (IS_ERR(phy)) {
1315 dev_err(priv->dev, 1316 dev_err(priv->dev,
1316 "phy \"%s\" not found on slave %d, err %ld\n", 1317 "phy \"%s\" not found on slave %d, err %ld\n",
1317 slave->data->phy_id, slave->slave_num, 1318 slave->data->phy_id, slave->slave_num,
1318 PTR_ERR(slave->phy)); 1319 PTR_ERR(phy));
1319 slave->phy = NULL;
1320 return; 1320 return;
1321 } 1321 }
1322 } 1322 }
1323 1323
1324 slave->phy = phy;
1325
1324 phy_attached_info(slave->phy); 1326 phy_attached_info(slave->phy);
1325 1327
1326 phy_start(slave->phy); 1328 phy_start(slave->phy);
@@ -1817,6 +1819,8 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
1817 } 1819 }
1818 1820
1819 cpsw_intr_enable(cpsw); 1821 cpsw_intr_enable(cpsw);
1822 netif_trans_update(ndev);
1823 netif_tx_wake_all_queues(ndev);
1820} 1824}
1821 1825
1822static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) 1826static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index b75d9cdcfb0c..ae48c809bac9 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -45,6 +45,8 @@ MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
45MODULE_LICENSE("GPL"); 45MODULE_LICENSE("GPL");
46MODULE_VERSION(DRV_VERSION); 46MODULE_VERSION(DRV_VERSION);
47 47
48#define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
49
48static int fjes_request_irq(struct fjes_adapter *); 50static int fjes_request_irq(struct fjes_adapter *);
49static void fjes_free_irq(struct fjes_adapter *); 51static void fjes_free_irq(struct fjes_adapter *);
50 52
@@ -78,7 +80,7 @@ static void fjes_rx_irq(struct fjes_adapter *, int);
78static int fjes_poll(struct napi_struct *, int); 80static int fjes_poll(struct napi_struct *, int);
79 81
80static const struct acpi_device_id fjes_acpi_ids[] = { 82static const struct acpi_device_id fjes_acpi_ids[] = {
81 {"PNP0C02", 0}, 83 {ACPI_MOTHERBOARD_RESOURCE_HID, 0},
82 {"", 0}, 84 {"", 0},
83}; 85};
84MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids); 86MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
@@ -115,18 +117,17 @@ static struct resource fjes_resource[] = {
115 }, 117 },
116}; 118};
117 119
118static int fjes_acpi_add(struct acpi_device *device) 120static bool is_extended_socket_device(struct acpi_device *device)
119{ 121{
120 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; 122 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
121 char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1]; 123 char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
122 struct platform_device *plat_dev;
123 union acpi_object *str; 124 union acpi_object *str;
124 acpi_status status; 125 acpi_status status;
125 int result; 126 int result;
126 127
127 status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer); 128 status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
128 if (ACPI_FAILURE(status)) 129 if (ACPI_FAILURE(status))
129 return -ENODEV; 130 return false;
130 131
131 str = buffer.pointer; 132 str = buffer.pointer;
132 result = utf16s_to_utf8s((wchar_t *)str->string.pointer, 133 result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
@@ -136,10 +137,42 @@ static int fjes_acpi_add(struct acpi_device *device)
136 137
137 if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) { 138 if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
138 kfree(buffer.pointer); 139 kfree(buffer.pointer);
139 return -ENODEV; 140 return false;
140 } 141 }
141 kfree(buffer.pointer); 142 kfree(buffer.pointer);
142 143
144 return true;
145}
146
147static int acpi_check_extended_socket_status(struct acpi_device *device)
148{
149 unsigned long long sta;
150 acpi_status status;
151
152 status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
153 if (ACPI_FAILURE(status))
154 return -ENODEV;
155
156 if (!((sta & ACPI_STA_DEVICE_PRESENT) &&
157 (sta & ACPI_STA_DEVICE_ENABLED) &&
158 (sta & ACPI_STA_DEVICE_UI) &&
159 (sta & ACPI_STA_DEVICE_FUNCTIONING)))
160 return -ENODEV;
161
162 return 0;
163}
164
165static int fjes_acpi_add(struct acpi_device *device)
166{
167 struct platform_device *plat_dev;
168 acpi_status status;
169
170 if (!is_extended_socket_device(device))
171 return -ENODEV;
172
173 if (acpi_check_extended_socket_status(device))
174 return -ENODEV;
175
143 status = acpi_walk_resources(device->handle, METHOD_NAME__CRS, 176 status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
144 fjes_get_acpi_resource, fjes_resource); 177 fjes_get_acpi_resource, fjes_resource);
145 if (ACPI_FAILURE(status)) 178 if (ACPI_FAILURE(status))
@@ -1316,7 +1349,7 @@ static void fjes_netdev_setup(struct net_device *netdev)
1316 netdev->min_mtu = fjes_support_mtu[0]; 1349 netdev->min_mtu = fjes_support_mtu[0];
1317 netdev->max_mtu = fjes_support_mtu[3]; 1350 netdev->max_mtu = fjes_support_mtu[3];
1318 netdev->flags |= IFF_BROADCAST; 1351 netdev->flags |= IFF_BROADCAST;
1319 netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER; 1352 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1320} 1353}
1321 1354
1322static void fjes_irq_watch_task(struct work_struct *work) 1355static void fjes_irq_watch_task(struct work_struct *work)
@@ -1473,11 +1506,44 @@ static void fjes_watch_unshare_task(struct work_struct *work)
1473 } 1506 }
1474} 1507}
1475 1508
1509static acpi_status
1510acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
1511 void *context, void **return_value)
1512{
1513 struct acpi_device *device;
1514 bool *found = context;
1515 int result;
1516
1517 result = acpi_bus_get_device(obj_handle, &device);
1518 if (result)
1519 return AE_OK;
1520
1521 if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID))
1522 return AE_OK;
1523
1524 if (!is_extended_socket_device(device))
1525 return AE_OK;
1526
1527 if (acpi_check_extended_socket_status(device))
1528 return AE_OK;
1529
1530 *found = true;
1531 return AE_CTRL_TERMINATE;
1532}
1533
1476/* fjes_init_module - Driver Registration Routine */ 1534/* fjes_init_module - Driver Registration Routine */
1477static int __init fjes_init_module(void) 1535static int __init fjes_init_module(void)
1478{ 1536{
1537 bool found = false;
1479 int result; 1538 int result;
1480 1539
1540 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
1541 acpi_find_extended_socket_device, NULL, &found,
1542 NULL);
1543
1544 if (!found)
1545 return -ENODEV;
1546
1481 pr_info("%s - version %s - %s\n", 1547 pr_info("%s - version %s - %s\n",
1482 fjes_driver_string, fjes_driver_version, fjes_copyright); 1548 fjes_driver_string, fjes_driver_version, fjes_copyright);
1483 1549
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 4c1d8cca247b..8dd0b8770328 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1231,8 +1231,11 @@ void netvsc_channel_cb(void *context)
1231 return; 1231 return;
1232 1232
1233 net_device = net_device_to_netvsc_device(ndev); 1233 net_device = net_device_to_netvsc_device(ndev);
1234 if (unlikely(net_device->destroy) && 1234 if (unlikely(!net_device))
1235 netvsc_channel_idle(net_device, q_idx)) 1235 return;
1236
1237 if (unlikely(net_device->destroy &&
1238 netvsc_channel_idle(net_device, q_idx)))
1236 return; 1239 return;
1237 1240
1238 /* commit_rd_index() -> hv_signal_on_read() needs this. */ 1241 /* commit_rd_index() -> hv_signal_on_read() needs this. */
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index ffedad2a360a..15b920086251 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -418,8 +418,9 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
418 memset(rd, 0, sizeof(*rd)); 418 memset(rd, 0, sizeof(*rd));
419 rd->hw = hwmap + i; 419 rd->hw = hwmap + i;
420 rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA); 420 rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA);
421 if (rd->buf == NULL || 421 if (rd->buf)
422 !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) { 422 busaddr = pci_map_single(pdev, rd->buf, len, dir);
423 if (rd->buf == NULL || pci_dma_mapping_error(pdev, busaddr)) {
423 if (rd->buf) { 424 if (rd->buf) {
424 net_err_ratelimited("%s: failed to create PCI-MAP for %p\n", 425 net_err_ratelimited("%s: failed to create PCI-MAP for %p\n",
425 __func__, rd->buf); 426 __func__, rd->buf);
@@ -430,8 +431,7 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
430 rd = r->rd + j; 431 rd = r->rd + j;
431 busaddr = rd_get_addr(rd); 432 busaddr = rd_get_addr(rd);
432 rd_set_addr_status(rd, 0, 0); 433 rd_set_addr_status(rd, 0, 0);
433 if (busaddr) 434 pci_unmap_single(pdev, busaddr, len, dir);
434 pci_unmap_single(pdev, busaddr, len, dir);
435 kfree(rd->buf); 435 kfree(rd->buf);
436 rd->buf = NULL; 436 rd->buf = NULL;
437 } 437 }
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index e2460a57e4b1..ed0d10f54f26 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1438,8 +1438,6 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
1438 skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT; 1438 skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
1439 skb_queue_tail(&dp83640->rx_queue, skb); 1439 skb_queue_tail(&dp83640->rx_queue, skb);
1440 schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT); 1440 schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
1441 } else {
1442 netif_rx_ni(skb);
1443 } 1441 }
1444 1442
1445 return true; 1443 return true;
diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c
index 6b988f77da08..61941e29daae 100644
--- a/drivers/net/phy/mdio-boardinfo.c
+++ b/drivers/net/phy/mdio-boardinfo.c
@@ -84,3 +84,4 @@ int mdiobus_register_board_info(const struct mdio_board_info *info,
84 84
85 return 0; 85 return 0;
86} 86}
87EXPORT_SYMBOL(mdiobus_register_board_info);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 6742070ca676..1326d99771c1 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -798,9 +798,6 @@ static struct phy_driver ksphy_driver[] = {
798 .read_status = genphy_read_status, 798 .read_status = genphy_read_status,
799 .ack_interrupt = kszphy_ack_interrupt, 799 .ack_interrupt = kszphy_ack_interrupt,
800 .config_intr = kszphy_config_intr, 800 .config_intr = kszphy_config_intr,
801 .get_sset_count = kszphy_get_sset_count,
802 .get_strings = kszphy_get_strings,
803 .get_stats = kszphy_get_stats,
804 .suspend = genphy_suspend, 801 .suspend = genphy_suspend,
805 .resume = genphy_resume, 802 .resume = genphy_resume,
806}, { 803}, {
@@ -940,9 +937,6 @@ static struct phy_driver ksphy_driver[] = {
940 .read_status = genphy_read_status, 937 .read_status = genphy_read_status,
941 .ack_interrupt = kszphy_ack_interrupt, 938 .ack_interrupt = kszphy_ack_interrupt,
942 .config_intr = kszphy_config_intr, 939 .config_intr = kszphy_config_intr,
943 .get_sset_count = kszphy_get_sset_count,
944 .get_strings = kszphy_get_strings,
945 .get_stats = kszphy_get_stats,
946 .suspend = genphy_suspend, 940 .suspend = genphy_suspend,
947 .resume = genphy_resume, 941 .resume = genphy_resume,
948}, { 942}, {
@@ -952,6 +946,7 @@ static struct phy_driver ksphy_driver[] = {
952 .features = PHY_GBIT_FEATURES, 946 .features = PHY_GBIT_FEATURES,
953 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 947 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
954 .driver_data = &ksz9021_type, 948 .driver_data = &ksz9021_type,
949 .probe = kszphy_probe,
955 .config_init = ksz9021_config_init, 950 .config_init = ksz9021_config_init,
956 .config_aneg = genphy_config_aneg, 951 .config_aneg = genphy_config_aneg,
957 .read_status = genphy_read_status, 952 .read_status = genphy_read_status,
@@ -971,6 +966,7 @@ static struct phy_driver ksphy_driver[] = {
971 .features = PHY_GBIT_FEATURES, 966 .features = PHY_GBIT_FEATURES,
972 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 967 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
973 .driver_data = &ksz9021_type, 968 .driver_data = &ksz9021_type,
969 .probe = kszphy_probe,
974 .config_init = ksz9031_config_init, 970 .config_init = ksz9031_config_init,
975 .config_aneg = genphy_config_aneg, 971 .config_aneg = genphy_config_aneg,
976 .read_status = ksz9031_read_status, 972 .read_status = ksz9031_read_status,
@@ -989,9 +985,6 @@ static struct phy_driver ksphy_driver[] = {
989 .config_init = kszphy_config_init, 985 .config_init = kszphy_config_init,
990 .config_aneg = ksz8873mll_config_aneg, 986 .config_aneg = ksz8873mll_config_aneg,
991 .read_status = ksz8873mll_read_status, 987 .read_status = ksz8873mll_read_status,
992 .get_sset_count = kszphy_get_sset_count,
993 .get_strings = kszphy_get_strings,
994 .get_stats = kszphy_get_stats,
995 .suspend = genphy_suspend, 988 .suspend = genphy_suspend,
996 .resume = genphy_resume, 989 .resume = genphy_resume,
997}, { 990}, {
@@ -1003,9 +996,6 @@ static struct phy_driver ksphy_driver[] = {
1003 .config_init = kszphy_config_init, 996 .config_init = kszphy_config_init,
1004 .config_aneg = genphy_config_aneg, 997 .config_aneg = genphy_config_aneg,
1005 .read_status = genphy_read_status, 998 .read_status = genphy_read_status,
1006 .get_sset_count = kszphy_get_sset_count,
1007 .get_strings = kszphy_get_strings,
1008 .get_stats = kszphy_get_stats,
1009 .suspend = genphy_suspend, 999 .suspend = genphy_suspend,
1010 .resume = genphy_resume, 1000 .resume = genphy_resume,
1011}, { 1001}, {
@@ -1017,9 +1007,6 @@ static struct phy_driver ksphy_driver[] = {
1017 .config_init = kszphy_config_init, 1007 .config_init = kszphy_config_init,
1018 .config_aneg = ksz8873mll_config_aneg, 1008 .config_aneg = ksz8873mll_config_aneg,
1019 .read_status = ksz8873mll_read_status, 1009 .read_status = ksz8873mll_read_status,
1020 .get_sset_count = kszphy_get_sset_count,
1021 .get_strings = kszphy_get_strings,
1022 .get_stats = kszphy_get_stats,
1023 .suspend = genphy_suspend, 1010 .suspend = genphy_suspend,
1024 .resume = genphy_resume, 1011 .resume = genphy_resume,
1025} }; 1012} };
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1be69d8bc909..a2bfc82e95d7 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -681,7 +681,7 @@ void phy_stop_machine(struct phy_device *phydev)
681 cancel_delayed_work_sync(&phydev->state_queue); 681 cancel_delayed_work_sync(&phydev->state_queue);
682 682
683 mutex_lock(&phydev->lock); 683 mutex_lock(&phydev->lock);
684 if (phydev->state > PHY_UP) 684 if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
685 phydev->state = PHY_UP; 685 phydev->state = PHY_UP;
686 mutex_unlock(&phydev->lock); 686 mutex_unlock(&phydev->lock);
687} 687}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 1b52520715ae..f8c81f12d988 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -990,7 +990,7 @@ static void team_port_disable(struct team *team,
990#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ 990#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
991 NETIF_F_RXCSUM | NETIF_F_ALL_TSO) 991 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
992 992
993static void ___team_compute_features(struct team *team) 993static void __team_compute_features(struct team *team)
994{ 994{
995 struct team_port *port; 995 struct team_port *port;
996 u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; 996 u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
@@ -1023,16 +1023,10 @@ static void ___team_compute_features(struct team *team)
1023 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE; 1023 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1024} 1024}
1025 1025
1026static void __team_compute_features(struct team *team)
1027{
1028 ___team_compute_features(team);
1029 netdev_change_features(team->dev);
1030}
1031
1032static void team_compute_features(struct team *team) 1026static void team_compute_features(struct team *team)
1033{ 1027{
1034 mutex_lock(&team->lock); 1028 mutex_lock(&team->lock);
1035 ___team_compute_features(team); 1029 __team_compute_features(team);
1036 mutex_unlock(&team->lock); 1030 mutex_unlock(&team->lock);
1037 netdev_change_features(team->dev); 1031 netdev_change_features(team->dev);
1038} 1032}
@@ -1641,6 +1635,7 @@ static void team_uninit(struct net_device *dev)
1641 team_notify_peers_fini(team); 1635 team_notify_peers_fini(team);
1642 team_queue_override_fini(team); 1636 team_queue_override_fini(team);
1643 mutex_unlock(&team->lock); 1637 mutex_unlock(&team->lock);
1638 netdev_change_features(dev);
1644} 1639}
1645 1640
1646static void team_destructor(struct net_device *dev) 1641static void team_destructor(struct net_device *dev)
@@ -1928,6 +1923,10 @@ static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
1928 mutex_lock(&team->lock); 1923 mutex_lock(&team->lock);
1929 err = team_port_add(team, port_dev); 1924 err = team_port_add(team, port_dev);
1930 mutex_unlock(&team->lock); 1925 mutex_unlock(&team->lock);
1926
1927 if (!err)
1928 netdev_change_features(dev);
1929
1931 return err; 1930 return err;
1932} 1931}
1933 1932
@@ -1939,6 +1938,10 @@ static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1939 mutex_lock(&team->lock); 1938 mutex_lock(&team->lock);
1940 err = team_port_del(team, port_dev); 1939 err = team_port_del(team, port_dev);
1941 mutex_unlock(&team->lock); 1940 mutex_unlock(&team->lock);
1941
1942 if (!err)
1943 netdev_change_features(dev);
1944
1942 return err; 1945 return err;
1943} 1946}
1944 1947
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 34cc3c590aa5..cc88cd7856f5 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1931,6 +1931,8 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
1931 return -EINVAL; 1931 return -EINVAL;
1932 1932
1933 tun->set_features = features; 1933 tun->set_features = features;
1934 tun->dev->wanted_features &= ~TUN_USER_FEATURES;
1935 tun->dev->wanted_features |= features;
1934 netdev_update_features(tun->dev); 1936 netdev_update_features(tun->dev);
1935 1937
1936 return 0; 1938 return 0;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index f5552aaaa77a..f3ae88fdf332 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -532,6 +532,7 @@ static const struct driver_info wwan_info = {
532#define LENOVO_VENDOR_ID 0x17ef 532#define LENOVO_VENDOR_ID 0x17ef
533#define NVIDIA_VENDOR_ID 0x0955 533#define NVIDIA_VENDOR_ID 0x0955
534#define HP_VENDOR_ID 0x03f0 534#define HP_VENDOR_ID 0x03f0
535#define MICROSOFT_VENDOR_ID 0x045e
535 536
536static const struct usb_device_id products[] = { 537static const struct usb_device_id products[] = {
537/* BLACKLIST !! 538/* BLACKLIST !!
@@ -761,6 +762,20 @@ static const struct usb_device_id products[] = {
761 .driver_info = 0, 762 .driver_info = 0,
762}, 763},
763 764
765/* Microsoft Surface 2 dock (based on Realtek RTL8152) */
766{
767 USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07ab, USB_CLASS_COMM,
768 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
769 .driver_info = 0,
770},
771
772/* Microsoft Surface 3 dock (based on Realtek RTL8153) */
773{
774 USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM,
775 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
776 .driver_info = 0,
777},
778
764/* WHITELIST!!! 779/* WHITELIST!!!
765 * 780 *
766 * CDC Ether uses two interfaces, not necessarily consecutive. 781 * CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
index 8a40202c0a17..c4f1c363e24b 100644
--- a/drivers/net/usb/ch9200.c
+++ b/drivers/net/usb/ch9200.c
@@ -254,14 +254,9 @@ static struct sk_buff *ch9200_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
254 tx_overhead = 0x40; 254 tx_overhead = 0x40;
255 255
256 len = skb->len; 256 len = skb->len;
257 if (skb_headroom(skb) < tx_overhead) { 257 if (skb_cow_head(skb, tx_overhead)) {
258 struct sk_buff *skb2;
259
260 skb2 = skb_copy_expand(skb, tx_overhead, 0, flags);
261 dev_kfree_skb_any(skb); 258 dev_kfree_skb_any(skb);
262 skb = skb2; 259 return NULL;
263 if (!skb)
264 return NULL;
265 } 260 }
266 261
267 __skb_push(skb, tx_overhead); 262 __skb_push(skb, tx_overhead);
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index e221bfcee76b..947bea81d924 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -293,12 +293,9 @@ static struct sk_buff *cx82310_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
293{ 293{
294 int len = skb->len; 294 int len = skb->len;
295 295
296 if (skb_headroom(skb) < 2) { 296 if (skb_cow_head(skb, 2)) {
297 struct sk_buff *skb2 = skb_copy_expand(skb, 2, 0, flags);
298 dev_kfree_skb_any(skb); 297 dev_kfree_skb_any(skb);
299 skb = skb2; 298 return NULL;
300 if (!skb)
301 return NULL;
302 } 299 }
303 skb_push(skb, 2); 300 skb_push(skb, 2);
304 301
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 876f02f4945e..2a2c3edb6bad 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -803,18 +803,12 @@ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb,
803 } 803 }
804 804
805 /* We now decide whether we can put our special header into the sk_buff */ 805 /* We now decide whether we can put our special header into the sk_buff */
806 if (skb_cloned(skb) || skb_headroom(skb) < 2) { 806 if (skb_cow_head(skb, 2)) {
807 /* no such luck - we make our own */ 807 kaweth->stats.tx_errors++;
808 struct sk_buff *copied_skb; 808 netif_start_queue(net);
809 copied_skb = skb_copy_expand(skb, 2, 0, GFP_ATOMIC); 809 spin_unlock_irq(&kaweth->device_lock);
810 dev_kfree_skb_irq(skb); 810 dev_kfree_skb_any(skb);
811 skb = copied_skb; 811 return NETDEV_TX_OK;
812 if (!copied_skb) {
813 kaweth->stats.tx_errors++;
814 netif_start_queue(net);
815 spin_unlock_irq(&kaweth->device_lock);
816 return NETDEV_TX_OK;
817 }
818 } 812 }
819 813
820 private_header = (__le16 *)__skb_push(skb, 2); 814 private_header = (__le16 *)__skb_push(skb, 2);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 9889a70ff4f6..636f48f19d1e 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2607,14 +2607,9 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2607{ 2607{
2608 u32 tx_cmd_a, tx_cmd_b; 2608 u32 tx_cmd_a, tx_cmd_b;
2609 2609
2610 if (skb_headroom(skb) < TX_OVERHEAD) { 2610 if (skb_cow_head(skb, TX_OVERHEAD)) {
2611 struct sk_buff *skb2;
2612
2613 skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2614 dev_kfree_skb_any(skb); 2611 dev_kfree_skb_any(skb);
2615 skb = skb2; 2612 return NULL;
2616 if (!skb)
2617 return NULL;
2618 } 2613 }
2619 2614
2620 if (lan78xx_linearize(skb) < 0) 2615 if (lan78xx_linearize(skb) < 0)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 805674550683..2474618404f5 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -580,6 +580,10 @@ static const struct usb_device_id products[] = {
580 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69), 580 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
581 .driver_info = (unsigned long)&qmi_wwan_info, 581 .driver_info = (unsigned long)&qmi_wwan_info,
582 }, 582 },
583 { /* Motorola Mapphone devices with MDM6600 */
584 USB_VENDOR_AND_INTERFACE_INFO(0x22b8, USB_CLASS_VENDOR_SPEC, 0xfb, 0xff),
585 .driver_info = (unsigned long)&qmi_wwan_info,
586 },
583 587
584 /* 2. Combined interface devices matching on class+protocol */ 588 /* 2. Combined interface devices matching on class+protocol */
585 { /* Huawei E367 and possibly others in "Windows mode" */ 589 { /* Huawei E367 and possibly others in "Windows mode" */
@@ -904,7 +908,7 @@ static const struct usb_device_id products[] = {
904 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 908 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
905 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ 909 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
906 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 910 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
907 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ 911 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
908 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ 912 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
909 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ 913 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
910 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ 914 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
@@ -925,6 +929,8 @@ static const struct usb_device_id products[] = {
925 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ 929 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
926 {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ 930 {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
927 {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ 931 {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
932 {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
933 {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
928 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ 934 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
929 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ 935 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
930 {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ 936 {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 986243c932cc..07f788c49d57 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
32#define NETNEXT_VERSION "08" 32#define NETNEXT_VERSION "08"
33 33
34/* Information for net */ 34/* Information for net */
35#define NET_VERSION "8" 35#define NET_VERSION "9"
36 36
37#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION 37#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
38#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" 38#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -501,6 +501,8 @@ enum rtl_register_content {
501#define RTL8153_RMS RTL8153_MAX_PACKET 501#define RTL8153_RMS RTL8153_MAX_PACKET
502#define RTL8152_TX_TIMEOUT (5 * HZ) 502#define RTL8152_TX_TIMEOUT (5 * HZ)
503#define RTL8152_NAPI_WEIGHT 64 503#define RTL8152_NAPI_WEIGHT 64
504#define rx_reserved_size(x) ((x) + VLAN_ETH_HLEN + CRC_SIZE + \
505 sizeof(struct rx_desc) + RX_ALIGN)
504 506
505/* rtl8152 flags */ 507/* rtl8152 flags */
506enum rtl8152_flags { 508enum rtl8152_flags {
@@ -515,6 +517,7 @@ enum rtl8152_flags {
515 517
516/* Define these values to match your device */ 518/* Define these values to match your device */
517#define VENDOR_ID_REALTEK 0x0bda 519#define VENDOR_ID_REALTEK 0x0bda
520#define VENDOR_ID_MICROSOFT 0x045e
518#define VENDOR_ID_SAMSUNG 0x04e8 521#define VENDOR_ID_SAMSUNG 0x04e8
519#define VENDOR_ID_LENOVO 0x17ef 522#define VENDOR_ID_LENOVO 0x17ef
520#define VENDOR_ID_NVIDIA 0x0955 523#define VENDOR_ID_NVIDIA 0x0955
@@ -1292,6 +1295,7 @@ static void intr_callback(struct urb *urb)
1292 } 1295 }
1293 } else { 1296 } else {
1294 if (netif_carrier_ok(tp->netdev)) { 1297 if (netif_carrier_ok(tp->netdev)) {
1298 netif_stop_queue(tp->netdev);
1295 set_bit(RTL8152_LINK_CHG, &tp->flags); 1299 set_bit(RTL8152_LINK_CHG, &tp->flags);
1296 schedule_delayed_work(&tp->schedule, 0); 1300 schedule_delayed_work(&tp->schedule, 0);
1297 } 1301 }
@@ -1362,6 +1366,7 @@ static int alloc_all_mem(struct r8152 *tp)
1362 spin_lock_init(&tp->rx_lock); 1366 spin_lock_init(&tp->rx_lock);
1363 spin_lock_init(&tp->tx_lock); 1367 spin_lock_init(&tp->tx_lock);
1364 INIT_LIST_HEAD(&tp->tx_free); 1368 INIT_LIST_HEAD(&tp->tx_free);
1369 INIT_LIST_HEAD(&tp->rx_done);
1365 skb_queue_head_init(&tp->tx_queue); 1370 skb_queue_head_init(&tp->tx_queue);
1366 skb_queue_head_init(&tp->rx_queue); 1371 skb_queue_head_init(&tp->rx_queue);
1367 1372
@@ -2252,8 +2257,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
2252 2257
2253static void r8153_set_rx_early_size(struct r8152 *tp) 2258static void r8153_set_rx_early_size(struct r8152 *tp)
2254{ 2259{
2255 u32 mtu = tp->netdev->mtu; 2260 u32 ocp_data = (agg_buf_sz - rx_reserved_size(tp->netdev->mtu)) / 4;
2256 u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8;
2257 2261
2258 ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data); 2262 ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data);
2259} 2263}
@@ -2898,7 +2902,8 @@ static void r8153_first_init(struct r8152 *tp)
2898 2902
2899 rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); 2903 rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX);
2900 2904
2901 ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS); 2905 ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE;
2906 ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
2902 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); 2907 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO);
2903 2908
2904 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0); 2909 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0);
@@ -2950,7 +2955,8 @@ static void r8153_enter_oob(struct r8152 *tp)
2950 usleep_range(1000, 2000); 2955 usleep_range(1000, 2000);
2951 } 2956 }
2952 2957
2953 ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS); 2958 ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE;
2959 ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
2954 2960
2955 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG); 2961 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
2956 ocp_data &= ~TEREDO_WAKE_MASK; 2962 ocp_data &= ~TEREDO_WAKE_MASK;
@@ -3165,6 +3171,9 @@ static void set_carrier(struct r8152 *tp)
3165 napi_enable(&tp->napi); 3171 napi_enable(&tp->napi);
3166 netif_wake_queue(netdev); 3172 netif_wake_queue(netdev);
3167 netif_info(tp, link, netdev, "carrier on\n"); 3173 netif_info(tp, link, netdev, "carrier on\n");
3174 } else if (netif_queue_stopped(netdev) &&
3175 skb_queue_len(&tp->tx_queue) < tp->tx_qlen) {
3176 netif_wake_queue(netdev);
3168 } 3177 }
3169 } else { 3178 } else {
3170 if (netif_carrier_ok(netdev)) { 3179 if (netif_carrier_ok(netdev)) {
@@ -3698,8 +3707,18 @@ static int rtl8152_resume(struct usb_interface *intf)
3698 tp->rtl_ops.autosuspend_en(tp, false); 3707 tp->rtl_ops.autosuspend_en(tp, false);
3699 napi_disable(&tp->napi); 3708 napi_disable(&tp->napi);
3700 set_bit(WORK_ENABLE, &tp->flags); 3709 set_bit(WORK_ENABLE, &tp->flags);
3701 if (netif_carrier_ok(tp->netdev)) 3710
3702 rtl_start_rx(tp); 3711 if (netif_carrier_ok(tp->netdev)) {
3712 if (rtl8152_get_speed(tp) & LINK_STATUS) {
3713 rtl_start_rx(tp);
3714 } else {
3715 netif_carrier_off(tp->netdev);
3716 tp->rtl_ops.disable(tp);
3717 netif_info(tp, link, tp->netdev,
3718 "linking down\n");
3719 }
3720 }
3721
3703 napi_enable(&tp->napi); 3722 napi_enable(&tp->napi);
3704 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3723 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3705 smp_mb__after_atomic(); 3724 smp_mb__after_atomic();
@@ -4200,8 +4219,14 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
4200 4219
4201 dev->mtu = new_mtu; 4220 dev->mtu = new_mtu;
4202 4221
4203 if (netif_running(dev) && netif_carrier_ok(dev)) 4222 if (netif_running(dev)) {
4204 r8153_set_rx_early_size(tp); 4223 u32 rms = new_mtu + VLAN_ETH_HLEN + CRC_SIZE;
4224
4225 ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rms);
4226
4227 if (netif_carrier_ok(dev))
4228 r8153_set_rx_early_size(tp);
4229 }
4205 4230
4206 mutex_unlock(&tp->control); 4231 mutex_unlock(&tp->control);
4207 4232
@@ -4497,6 +4522,8 @@ static void rtl8152_disconnect(struct usb_interface *intf)
4497static struct usb_device_id rtl8152_table[] = { 4522static struct usb_device_id rtl8152_table[] = {
4498 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, 4523 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)},
4499 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, 4524 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
4525 {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)},
4526 {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)},
4500 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, 4527 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
4501 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, 4528 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
4502 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)}, 4529 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 0b17b40d7a4f..190de9a90f73 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -2203,13 +2203,9 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
2203{ 2203{
2204 u32 tx_cmd_a, tx_cmd_b; 2204 u32 tx_cmd_a, tx_cmd_b;
2205 2205
2206 if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) { 2206 if (skb_cow_head(skb, SMSC75XX_TX_OVERHEAD)) {
2207 struct sk_buff *skb2 =
2208 skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
2209 dev_kfree_skb_any(skb); 2207 dev_kfree_skb_any(skb);
2210 skb = skb2; 2208 return NULL;
2211 if (!skb)
2212 return NULL;
2213 } 2209 }
2214 2210
2215 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS; 2211 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 831aa33d078a..5f19fb0f025d 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -2001,13 +2001,13 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
2001 /* We do not advertise SG, so skbs should be already linearized */ 2001 /* We do not advertise SG, so skbs should be already linearized */
2002 BUG_ON(skb_shinfo(skb)->nr_frags); 2002 BUG_ON(skb_shinfo(skb)->nr_frags);
2003 2003
2004 if (skb_headroom(skb) < overhead) { 2004 /* Make writable and expand header space by overhead if required */
2005 struct sk_buff *skb2 = skb_copy_expand(skb, 2005 if (skb_cow_head(skb, overhead)) {
2006 overhead, 0, flags); 2006 /* Must deallocate here as returning NULL to indicate error
2007 * means the skb won't be deallocated in the caller.
2008 */
2007 dev_kfree_skb_any(skb); 2009 dev_kfree_skb_any(skb);
2008 skb = skb2; 2010 return NULL;
2009 if (!skb)
2010 return NULL;
2011 } 2011 }
2012 2012
2013 if (csum) { 2013 if (csum) {
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 4a1e9c489f1f..aadfe1d1c37e 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -456,14 +456,9 @@ static struct sk_buff *sr9700_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
456 456
457 len = skb->len; 457 len = skb->len;
458 458
459 if (skb_headroom(skb) < SR_TX_OVERHEAD) { 459 if (skb_cow_head(skb, SR_TX_OVERHEAD)) {
460 struct sk_buff *skb2;
461
462 skb2 = skb_copy_expand(skb, SR_TX_OVERHEAD, 0, flags);
463 dev_kfree_skb_any(skb); 460 dev_kfree_skb_any(skb);
464 skb = skb2; 461 return NULL;
465 if (!skb)
466 return NULL;
467 } 462 }
468 463
469 __skb_push(skb, SR_TX_OVERHEAD); 464 __skb_push(skb, SR_TX_OVERHEAD);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 3de65ea6531a..453244805c52 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1929,7 +1929,7 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1929 " value=0x%04x index=0x%04x size=%d\n", 1929 " value=0x%04x index=0x%04x size=%d\n",
1930 cmd, reqtype, value, index, size); 1930 cmd, reqtype, value, index, size);
1931 1931
1932 if (data) { 1932 if (size) {
1933 buf = kmalloc(size, GFP_KERNEL); 1933 buf = kmalloc(size, GFP_KERNEL);
1934 if (!buf) 1934 if (!buf)
1935 goto out; 1935 goto out;
@@ -1938,8 +1938,13 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1938 err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 1938 err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
1939 cmd, reqtype, value, index, buf, size, 1939 cmd, reqtype, value, index, buf, size,
1940 USB_CTRL_GET_TIMEOUT); 1940 USB_CTRL_GET_TIMEOUT);
1941 if (err > 0 && err <= size) 1941 if (err > 0 && err <= size) {
1942 memcpy(data, buf, err); 1942 if (data)
1943 memcpy(data, buf, err);
1944 else
1945 netdev_dbg(dev->net,
1946 "Huh? Data requested but thrown away.\n");
1947 }
1943 kfree(buf); 1948 kfree(buf);
1944out: 1949out:
1945 return err; 1950 return err;
@@ -1960,7 +1965,13 @@ static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1960 buf = kmemdup(data, size, GFP_KERNEL); 1965 buf = kmemdup(data, size, GFP_KERNEL);
1961 if (!buf) 1966 if (!buf)
1962 goto out; 1967 goto out;
1963 } 1968 } else {
1969 if (size) {
1970 WARN_ON_ONCE(1);
1971 err = -EINVAL;
1972 goto out;
1973 }
1974 }
1964 1975
1965 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 1976 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
1966 cmd, reqtype, value, index, buf, size, 1977 cmd, reqtype, value, index, buf, size,
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ea9890d61967..f36584616e7d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2230,14 +2230,8 @@ static bool virtnet_validate_features(struct virtio_device *vdev)
2230#define MIN_MTU ETH_MIN_MTU 2230#define MIN_MTU ETH_MIN_MTU
2231#define MAX_MTU ETH_MAX_MTU 2231#define MAX_MTU ETH_MAX_MTU
2232 2232
2233static int virtnet_probe(struct virtio_device *vdev) 2233static int virtnet_validate(struct virtio_device *vdev)
2234{ 2234{
2235 int i, err;
2236 struct net_device *dev;
2237 struct virtnet_info *vi;
2238 u16 max_queue_pairs;
2239 int mtu;
2240
2241 if (!vdev->config->get) { 2235 if (!vdev->config->get) {
2242 dev_err(&vdev->dev, "%s failure: config access disabled\n", 2236 dev_err(&vdev->dev, "%s failure: config access disabled\n",
2243 __func__); 2237 __func__);
@@ -2247,6 +2241,25 @@ static int virtnet_probe(struct virtio_device *vdev)
2247 if (!virtnet_validate_features(vdev)) 2241 if (!virtnet_validate_features(vdev))
2248 return -EINVAL; 2242 return -EINVAL;
2249 2243
2244 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
2245 int mtu = virtio_cread16(vdev,
2246 offsetof(struct virtio_net_config,
2247 mtu));
2248 if (mtu < MIN_MTU)
2249 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
2250 }
2251
2252 return 0;
2253}
2254
2255static int virtnet_probe(struct virtio_device *vdev)
2256{
2257 int i, err;
2258 struct net_device *dev;
2259 struct virtnet_info *vi;
2260 u16 max_queue_pairs;
2261 int mtu;
2262
2250 /* Find if host supports multiqueue virtio_net device */ 2263 /* Find if host supports multiqueue virtio_net device */
2251 err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, 2264 err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
2252 struct virtio_net_config, 2265 struct virtio_net_config,
@@ -2362,11 +2375,20 @@ static int virtnet_probe(struct virtio_device *vdev)
2362 offsetof(struct virtio_net_config, 2375 offsetof(struct virtio_net_config,
2363 mtu)); 2376 mtu));
2364 if (mtu < dev->min_mtu) { 2377 if (mtu < dev->min_mtu) {
2365 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); 2378 /* Should never trigger: MTU was previously validated
2366 } else { 2379 * in virtnet_validate.
2367 dev->mtu = mtu; 2380 */
2368 dev->max_mtu = mtu; 2381 dev_err(&vdev->dev, "device MTU appears to have changed "
2382 "it is now %d < %d", mtu, dev->min_mtu);
2383 goto free_stats;
2369 } 2384 }
2385
2386 dev->mtu = mtu;
2387 dev->max_mtu = mtu;
2388
2389 /* TODO: size buffers correctly in this case. */
2390 if (dev->mtu > ETH_DATA_LEN)
2391 vi->big_packets = true;
2370 } 2392 }
2371 2393
2372 if (vi->any_header_sg) 2394 if (vi->any_header_sg)
@@ -2544,6 +2566,7 @@ static struct virtio_driver virtio_net_driver = {
2544 .driver.name = KBUILD_MODNAME, 2566 .driver.name = KBUILD_MODNAME,
2545 .driver.owner = THIS_MODULE, 2567 .driver.owner = THIS_MODULE,
2546 .id_table = id_table, 2568 .id_table = id_table,
2569 .validate = virtnet_validate,
2547 .probe = virtnet_probe, 2570 .probe = virtnet_probe,
2548 .remove = virtnet_remove, 2571 .remove = virtnet_remove,
2549 .config_changed = virtnet_config_changed, 2572 .config_changed = virtnet_config_changed,
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index fea687f35b5a..7d909c8183e9 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -462,8 +462,10 @@ static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
462 } 462 }
463 463
464 if (rt6_local) { 464 if (rt6_local) {
465 if (rt6_local->rt6i_idev) 465 if (rt6_local->rt6i_idev) {
466 in6_dev_put(rt6_local->rt6i_idev); 466 in6_dev_put(rt6_local->rt6i_idev);
467 rt6_local->rt6i_idev = NULL;
468 }
467 469
468 dst = &rt6_local->dst; 470 dst = &rt6_local->dst;
469 dev_put(dst->dev); 471 dev_put(dst->dev);
@@ -1126,7 +1128,7 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
1126 goto nla_put_failure; 1128 goto nla_put_failure;
1127 1129
1128 /* rule only needs to appear once */ 1130 /* rule only needs to appear once */
1129 nlh->nlmsg_flags &= NLM_F_EXCL; 1131 nlh->nlmsg_flags |= NLM_F_EXCL;
1130 1132
1131 frh = nlmsg_data(nlh); 1133 frh = nlmsg_data(nlh);
1132 memset(frh, 0, sizeof(*frh)); 1134 memset(frh, 0, sizeof(*frh));
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 33fb26833cd0..d9f37ee4bfdd 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -51,7 +51,7 @@ const struct ath10k_hw_regs qca6174_regs = {
51 .rtc_soc_base_address = 0x00000800, 51 .rtc_soc_base_address = 0x00000800,
52 .rtc_wmac_base_address = 0x00001000, 52 .rtc_wmac_base_address = 0x00001000,
53 .soc_core_base_address = 0x0003a000, 53 .soc_core_base_address = 0x0003a000,
54 .wlan_mac_base_address = 0x00020000, 54 .wlan_mac_base_address = 0x00010000,
55 .ce_wrapper_base_address = 0x00034000, 55 .ce_wrapper_base_address = 0x00034000,
56 .ce0_base_address = 0x00034400, 56 .ce0_base_address = 0x00034400,
57 .ce1_base_address = 0x00034800, 57 .ce1_base_address = 0x00034800,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index de19c7c92bc6..85d949e03f79 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -2238,14 +2238,16 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
2238 struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy); 2238 struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
2239 struct brcmf_p2p_info *p2p = &cfg->p2p; 2239 struct brcmf_p2p_info *p2p = &cfg->p2p;
2240 struct brcmf_cfg80211_vif *vif; 2240 struct brcmf_cfg80211_vif *vif;
2241 enum nl80211_iftype iftype;
2241 bool wait_for_disable = false; 2242 bool wait_for_disable = false;
2242 int err; 2243 int err;
2243 2244
2244 brcmf_dbg(TRACE, "delete P2P vif\n"); 2245 brcmf_dbg(TRACE, "delete P2P vif\n");
2245 vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); 2246 vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
2246 2247
2248 iftype = vif->wdev.iftype;
2247 brcmf_cfg80211_arm_vif_event(cfg, vif); 2249 brcmf_cfg80211_arm_vif_event(cfg, vif);
2248 switch (vif->wdev.iftype) { 2250 switch (iftype) {
2249 case NL80211_IFTYPE_P2P_CLIENT: 2251 case NL80211_IFTYPE_P2P_CLIENT:
2250 if (test_bit(BRCMF_VIF_STATUS_DISCONNECTING, &vif->sme_state)) 2252 if (test_bit(BRCMF_VIF_STATUS_DISCONNECTING, &vif->sme_state))
2251 wait_for_disable = true; 2253 wait_for_disable = true;
@@ -2275,7 +2277,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
2275 BRCMF_P2P_DISABLE_TIMEOUT); 2277 BRCMF_P2P_DISABLE_TIMEOUT);
2276 2278
2277 err = 0; 2279 err = 0;
2278 if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE) { 2280 if (iftype != NL80211_IFTYPE_P2P_DEVICE) {
2279 brcmf_vif_clear_mgmt_ies(vif); 2281 brcmf_vif_clear_mgmt_ies(vif);
2280 err = brcmf_p2p_release_p2p_if(vif); 2282 err = brcmf_p2p_release_p2p_if(vif);
2281 } 2283 }
@@ -2291,7 +2293,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
2291 brcmf_remove_interface(vif->ifp, true); 2293 brcmf_remove_interface(vif->ifp, true);
2292 2294
2293 brcmf_cfg80211_arm_vif_event(cfg, NULL); 2295 brcmf_cfg80211_arm_vif_event(cfg, NULL);
2294 if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE) 2296 if (iftype != NL80211_IFTYPE_P2P_DEVICE)
2295 p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL; 2297 p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL;
2296 2298
2297 return err; 2299 return err;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index a260cd503200..077bfd8f4c0c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1056,6 +1056,8 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
1056 1056
1057 if (ret) 1057 if (ret)
1058 return ret; 1058 return ret;
1059 if (count == 0)
1060 return 0;
1059 1061
1060 iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, buf, 1062 iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, buf,
1061 (count - 1), NULL); 1063 (count - 1), NULL);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 99132ea16ede..c5734e1a02d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -216,7 +216,8 @@ u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif)
216 qmask |= BIT(vif->hw_queue[ac]); 216 qmask |= BIT(vif->hw_queue[ac]);
217 } 217 }
218 218
219 if (vif->type == NL80211_IFTYPE_AP) 219 if (vif->type == NL80211_IFTYPE_AP ||
220 vif->type == NL80211_IFTYPE_ADHOC)
220 qmask |= BIT(vif->cab_queue); 221 qmask |= BIT(vif->cab_queue);
221 222
222 return qmask; 223 return qmask;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index d37b1695c64e..486dcceed17a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -2319,7 +2319,7 @@ iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
2319{ 2319{
2320 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2320 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2321 2321
2322 /* Called when we need to transmit (a) frame(s) from agg queue */ 2322 /* Called when we need to transmit (a) frame(s) from agg or dqa queue */
2323 2323
2324 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, 2324 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2325 tids, more_data, true); 2325 tids, more_data, true);
@@ -2338,7 +2338,8 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2338 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { 2338 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2339 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 2339 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2340 2340
2341 if (tid_data->state != IWL_AGG_ON && 2341 if (!iwl_mvm_is_dqa_supported(mvm) &&
2342 tid_data->state != IWL_AGG_ON &&
2342 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA) 2343 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
2343 continue; 2344 continue;
2344 2345
@@ -2400,7 +2401,7 @@ void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
2400 return; 2401 return;
2401 2402
2402 rcu_read_lock(); 2403 rcu_read_lock();
2403 sta = mvm->fw_id_to_mac_id[notif->sta_id]; 2404 sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]);
2404 if (WARN_ON(IS_ERR_OR_NULL(sta))) { 2405 if (WARN_ON(IS_ERR_OR_NULL(sta))) {
2405 rcu_read_unlock(); 2406 rcu_read_unlock();
2406 return; 2407 return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index bd1dcc863d8f..9d28db7f56aa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1806,7 +1806,8 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1806 iwl_mvm_get_wd_timeout(mvm, vif, false, false); 1806 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
1807 int queue; 1807 int queue;
1808 1808
1809 if (vif->type == NL80211_IFTYPE_AP) 1809 if (vif->type == NL80211_IFTYPE_AP ||
1810 vif->type == NL80211_IFTYPE_ADHOC)
1810 queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; 1811 queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
1811 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) 1812 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
1812 queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; 1813 queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
@@ -1837,7 +1838,8 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1837 * enabled-cab_queue to the mask) 1838 * enabled-cab_queue to the mask)
1838 */ 1839 */
1839 if (iwl_mvm_is_dqa_supported(mvm) && 1840 if (iwl_mvm_is_dqa_supported(mvm) &&
1840 vif->type == NL80211_IFTYPE_AP) { 1841 (vif->type == NL80211_IFTYPE_AP ||
1842 vif->type == NL80211_IFTYPE_ADHOC)) {
1841 struct iwl_trans_txq_scd_cfg cfg = { 1843 struct iwl_trans_txq_scd_cfg cfg = {
1842 .fifo = IWL_MVM_TX_FIFO_MCAST, 1844 .fifo = IWL_MVM_TX_FIFO_MCAST,
1843 .sta_id = mvmvif->bcast_sta.sta_id, 1845 .sta_id = mvmvif->bcast_sta.sta_id,
@@ -1862,7 +1864,8 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
1862 1864
1863 lockdep_assert_held(&mvm->mutex); 1865 lockdep_assert_held(&mvm->mutex);
1864 1866
1865 if (vif->type == NL80211_IFTYPE_AP) 1867 if (vif->type == NL80211_IFTYPE_AP ||
1868 vif->type == NL80211_IFTYPE_ADHOC)
1866 iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue, 1869 iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
1867 IWL_MAX_TID_COUNT, 0); 1870 IWL_MAX_TID_COUNT, 0);
1868 1871
@@ -3135,7 +3138,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3135 struct ieee80211_sta *sta, 3138 struct ieee80211_sta *sta,
3136 enum ieee80211_frame_release_type reason, 3139 enum ieee80211_frame_release_type reason,
3137 u16 cnt, u16 tids, bool more_data, 3140 u16 cnt, u16 tids, bool more_data,
3138 bool agg) 3141 bool single_sta_queue)
3139{ 3142{
3140 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 3143 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3141 struct iwl_mvm_add_sta_cmd cmd = { 3144 struct iwl_mvm_add_sta_cmd cmd = {
@@ -3155,14 +3158,14 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3155 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) 3158 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3156 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); 3159 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3157 3160
3158 /* If we're releasing frames from aggregation queues then check if the 3161 /* If we're releasing frames from aggregation or dqa queues then check
3159 * all queues combined that we're releasing frames from have 3162 * if all the queues that we're releasing frames from, combined, have:
3160 * - more frames than the service period, in which case more_data 3163 * - more frames than the service period, in which case more_data
3161 * needs to be set 3164 * needs to be set
3162 * - fewer than 'cnt' frames, in which case we need to adjust the 3165 * - fewer than 'cnt' frames, in which case we need to adjust the
3163 * firmware command (but do that unconditionally) 3166 * firmware command (but do that unconditionally)
3164 */ 3167 */
3165 if (agg) { 3168 if (single_sta_queue) {
3166 int remaining = cnt; 3169 int remaining = cnt;
3167 int sleep_tx_count; 3170 int sleep_tx_count;
3168 3171
@@ -3172,7 +3175,8 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3172 u16 n_queued; 3175 u16 n_queued;
3173 3176
3174 tid_data = &mvmsta->tid_data[tid]; 3177 tid_data = &mvmsta->tid_data[tid];
3175 if (WARN(tid_data->state != IWL_AGG_ON && 3178 if (WARN(!iwl_mvm_is_dqa_supported(mvm) &&
3179 tid_data->state != IWL_AGG_ON &&
3176 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA, 3180 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
3177 "TID %d state is %d\n", 3181 "TID %d state is %d\n",
3178 tid, tid_data->state)) { 3182 tid, tid_data->state)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 4be34f902278..1927ce607798 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -547,7 +547,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
547 struct ieee80211_sta *sta, 547 struct ieee80211_sta *sta,
548 enum ieee80211_frame_release_type reason, 548 enum ieee80211_frame_release_type reason,
549 u16 cnt, u16 tids, bool more_data, 549 u16 cnt, u16 tids, bool more_data,
550 bool agg); 550 bool single_sta_queue);
551int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, 551int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
552 bool drain); 552 bool drain);
553void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, 553void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index dd2b4a300819..1ba0a6f55503 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 13 * it under the terms of version 2 of the GNU General Public License as
@@ -34,6 +34,7 @@
34 * 34 *
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
37 * All rights reserved. 38 * All rights reserved.
38 * 39 *
39 * Redistribution and use in source and binary forms, with or without 40 * Redistribution and use in source and binary forms, with or without
@@ -505,6 +506,7 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
505 506
506 switch (info->control.vif->type) { 507 switch (info->control.vif->type) {
507 case NL80211_IFTYPE_AP: 508 case NL80211_IFTYPE_AP:
509 case NL80211_IFTYPE_ADHOC:
508 /* 510 /*
509 * Handle legacy hostapd as well, where station may be added 511 * Handle legacy hostapd as well, where station may be added
510 * only after assoc. Take care of the case where we send a 512 * only after assoc. Take care of the case where we send a
@@ -516,7 +518,8 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
516 if (info->hw_queue == info->control.vif->cab_queue) 518 if (info->hw_queue == info->control.vif->cab_queue)
517 return info->hw_queue; 519 return info->hw_queue;
518 520
519 WARN_ONCE(1, "fc=0x%02x", le16_to_cpu(fc)); 521 WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
522 "fc=0x%02x", le16_to_cpu(fc));
520 return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; 523 return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
521 case NL80211_IFTYPE_P2P_DEVICE: 524 case NL80211_IFTYPE_P2P_DEVICE:
522 if (ieee80211_is_mgmt(fc)) 525 if (ieee80211_is_mgmt(fc))
@@ -583,7 +586,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
583 iwl_mvm_vif_from_mac80211(info.control.vif); 586 iwl_mvm_vif_from_mac80211(info.control.vif);
584 587
585 if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || 588 if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
586 info.control.vif->type == NL80211_IFTYPE_AP) { 589 info.control.vif->type == NL80211_IFTYPE_AP ||
590 info.control.vif->type == NL80211_IFTYPE_ADHOC) {
587 sta_id = mvmvif->bcast_sta.sta_id; 591 sta_id = mvmvif->bcast_sta.sta_id;
588 queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, 592 queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
589 hdr->frame_control); 593 hdr->frame_control);
@@ -628,8 +632,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
628 * values. 632 * values.
629 * Note that we don't need to make sure it isn't agg'd, since we're 633 * Note that we don't need to make sure it isn't agg'd, since we're
630 * TXing non-sta 634 * TXing non-sta
635 * For DQA mode - we shouldn't increase it though
631 */ 636 */
632 atomic_inc(&mvm->pending_frames[sta_id]); 637 if (!iwl_mvm_is_dqa_supported(mvm))
638 atomic_inc(&mvm->pending_frames[sta_id]);
633 639
634 return 0; 640 return 0;
635} 641}
@@ -1005,11 +1011,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
1005 1011
1006 spin_unlock(&mvmsta->lock); 1012 spin_unlock(&mvmsta->lock);
1007 1013
1008 /* Increase pending frames count if this isn't AMPDU */ 1014 /* Increase pending frames count if this isn't AMPDU or DQA queue */
1009 if ((iwl_mvm_is_dqa_supported(mvm) && 1015 if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu)
1010 mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_ON &&
1011 mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_STARTING) ||
1012 (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu))
1013 atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); 1016 atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
1014 1017
1015 return 0; 1018 return 0;
@@ -1079,12 +1082,13 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
1079 lockdep_assert_held(&mvmsta->lock); 1082 lockdep_assert_held(&mvmsta->lock);
1080 1083
1081 if ((tid_data->state == IWL_AGG_ON || 1084 if ((tid_data->state == IWL_AGG_ON ||
1082 tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) && 1085 tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA ||
1086 iwl_mvm_is_dqa_supported(mvm)) &&
1083 iwl_mvm_tid_queued(tid_data) == 0) { 1087 iwl_mvm_tid_queued(tid_data) == 0) {
1084 /* 1088 /*
1085 * Now that this aggregation queue is empty tell mac80211 so it 1089 * Now that this aggregation or DQA queue is empty tell
1086 * knows we no longer have frames buffered for the station on 1090 * mac80211 so it knows we no longer have frames buffered for
1087 * this TID (for the TIM bitmap calculation.) 1091 * the station on this TID (for the TIM bitmap calculation.)
1088 */ 1092 */
1089 ieee80211_sta_set_buffered(sta, tid, false); 1093 ieee80211_sta_set_buffered(sta, tid, false);
1090 } 1094 }
@@ -1257,7 +1261,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1257 u8 skb_freed = 0; 1261 u8 skb_freed = 0;
1258 u16 next_reclaimed, seq_ctl; 1262 u16 next_reclaimed, seq_ctl;
1259 bool is_ndp = false; 1263 bool is_ndp = false;
1260 bool txq_agg = false; /* Is this TXQ aggregated */
1261 1264
1262 __skb_queue_head_init(&skbs); 1265 __skb_queue_head_init(&skbs);
1263 1266
@@ -1283,6 +1286,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1283 info->flags |= IEEE80211_TX_STAT_ACK; 1286 info->flags |= IEEE80211_TX_STAT_ACK;
1284 break; 1287 break;
1285 case TX_STATUS_FAIL_DEST_PS: 1288 case TX_STATUS_FAIL_DEST_PS:
1289 /* In DQA, the FW should have stopped the queue and not
1290 * return this status
1291 */
1292 WARN_ON(iwl_mvm_is_dqa_supported(mvm));
1286 info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 1293 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1287 break; 1294 break;
1288 default: 1295 default:
@@ -1387,15 +1394,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1387 bool send_eosp_ndp = false; 1394 bool send_eosp_ndp = false;
1388 1395
1389 spin_lock_bh(&mvmsta->lock); 1396 spin_lock_bh(&mvmsta->lock);
1390 if (iwl_mvm_is_dqa_supported(mvm)) {
1391 enum iwl_mvm_agg_state state;
1392
1393 state = mvmsta->tid_data[tid].state;
1394 txq_agg = (state == IWL_AGG_ON ||
1395 state == IWL_EMPTYING_HW_QUEUE_DELBA);
1396 } else {
1397 txq_agg = txq_id >= mvm->first_agg_queue;
1398 }
1399 1397
1400 if (!is_ndp) { 1398 if (!is_ndp) {
1401 tid_data->next_reclaimed = next_reclaimed; 1399 tid_data->next_reclaimed = next_reclaimed;
@@ -1452,11 +1450,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1452 * If the txq is not an AMPDU queue, there is no chance we freed 1450 * If the txq is not an AMPDU queue, there is no chance we freed
1453 * several skbs. Check that out... 1451 * several skbs. Check that out...
1454 */ 1452 */
1455 if (txq_agg) 1453 if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue)
1456 goto out; 1454 goto out;
1457 1455
1458 /* We can't free more than one frame at once on a shared queue */ 1456 /* We can't free more than one frame at once on a shared queue */
1459 WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1)); 1457 WARN_ON(skb_freed > 1);
1460 1458
1461 /* If we have still frames for this STA nothing to do here */ 1459 /* If we have still frames for this STA nothing to do here */
1462 if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) 1460 if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 5ebca1d0cfc7..b62e03d11c2e 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -57,8 +57,8 @@ MODULE_PARM_DESC(mfg_mode, "manufacturing mode enable:1, disable:0");
57 * In case of any errors during inittialization, this function also ensures 57 * In case of any errors during inittialization, this function also ensures
58 * proper cleanup before exiting. 58 * proper cleanup before exiting.
59 */ 59 */
60static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops, 60static int mwifiex_register(void *card, struct device *dev,
61 void **padapter) 61 struct mwifiex_if_ops *if_ops, void **padapter)
62{ 62{
63 struct mwifiex_adapter *adapter; 63 struct mwifiex_adapter *adapter;
64 int i; 64 int i;
@@ -68,6 +68,7 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
68 return -ENOMEM; 68 return -ENOMEM;
69 69
70 *padapter = adapter; 70 *padapter = adapter;
71 adapter->dev = dev;
71 adapter->card = card; 72 adapter->card = card;
72 73
73 /* Save interface specific operations in adapter */ 74 /* Save interface specific operations in adapter */
@@ -1568,12 +1569,11 @@ mwifiex_add_card(void *card, struct completion *fw_done,
1568{ 1569{
1569 struct mwifiex_adapter *adapter; 1570 struct mwifiex_adapter *adapter;
1570 1571
1571 if (mwifiex_register(card, if_ops, (void **)&adapter)) { 1572 if (mwifiex_register(card, dev, if_ops, (void **)&adapter)) {
1572 pr_err("%s: software init failed\n", __func__); 1573 pr_err("%s: software init failed\n", __func__);
1573 goto err_init_sw; 1574 goto err_init_sw;
1574 } 1575 }
1575 1576
1576 adapter->dev = dev;
1577 mwifiex_probe_of(adapter); 1577 mwifiex_probe_of(adapter);
1578 1578
1579 adapter->iface_type = iface_type; 1579 adapter->iface_type = iface_type;
@@ -1718,6 +1718,9 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter)
1718 wiphy_unregister(adapter->wiphy); 1718 wiphy_unregister(adapter->wiphy);
1719 wiphy_free(adapter->wiphy); 1719 wiphy_free(adapter->wiphy);
1720 1720
1721 if (adapter->irq_wakeup >= 0)
1722 device_init_wakeup(adapter->dev, false);
1723
1721 /* Unregister device */ 1724 /* Unregister device */
1722 mwifiex_dbg(adapter, INFO, 1725 mwifiex_dbg(adapter, INFO,
1723 "info: unregister device\n"); 1726 "info: unregister device\n");
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index a0d918094889..b8c990d10d6e 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -2739,6 +2739,21 @@ static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter)
2739 schedule_work(&card->work); 2739 schedule_work(&card->work);
2740} 2740}
2741 2741
2742static void mwifiex_pcie_free_buffers(struct mwifiex_adapter *adapter)
2743{
2744 struct pcie_service_card *card = adapter->card;
2745 const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
2746
2747 if (reg->sleep_cookie)
2748 mwifiex_pcie_delete_sleep_cookie_buf(adapter);
2749
2750 mwifiex_pcie_delete_cmdrsp_buf(adapter);
2751 mwifiex_pcie_delete_evtbd_ring(adapter);
2752 mwifiex_pcie_delete_rxbd_ring(adapter);
2753 mwifiex_pcie_delete_txbd_ring(adapter);
2754 card->cmdrsp_buf = NULL;
2755}
2756
2742/* 2757/*
2743 * This function initializes the PCI-E host memory space, WCB rings, etc. 2758 * This function initializes the PCI-E host memory space, WCB rings, etc.
2744 * 2759 *
@@ -2850,13 +2865,6 @@ err_enable_dev:
2850 2865
2851/* 2866/*
2852 * This function cleans up the allocated card buffers. 2867 * This function cleans up the allocated card buffers.
2853 *
2854 * The following are freed by this function -
2855 * - TXBD ring buffers
2856 * - RXBD ring buffers
2857 * - Event BD ring buffers
2858 * - Command response ring buffer
2859 * - Sleep cookie buffer
2860 */ 2868 */
2861static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) 2869static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
2862{ 2870{
@@ -2875,6 +2883,8 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
2875 "Failed to write driver not-ready signature\n"); 2883 "Failed to write driver not-ready signature\n");
2876 } 2884 }
2877 2885
2886 mwifiex_pcie_free_buffers(adapter);
2887
2878 if (pdev) { 2888 if (pdev) {
2879 pci_iounmap(pdev, card->pci_mmap); 2889 pci_iounmap(pdev, card->pci_mmap);
2880 pci_iounmap(pdev, card->pci_mmap1); 2890 pci_iounmap(pdev, card->pci_mmap1);
@@ -3126,10 +3136,7 @@ err_cre_txbd:
3126 pci_iounmap(pdev, card->pci_mmap1); 3136 pci_iounmap(pdev, card->pci_mmap1);
3127} 3137}
3128 3138
3129/* This function cleans up the PCI-E host memory space. 3139/* This function cleans up the PCI-E host memory space. */
3130 * Some code is extracted from mwifiex_unregister_dev()
3131 *
3132 */
3133static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter) 3140static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
3134{ 3141{
3135 struct pcie_service_card *card = adapter->card; 3142 struct pcie_service_card *card = adapter->card;
@@ -3140,14 +3147,7 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
3140 3147
3141 adapter->seq_num = 0; 3148 adapter->seq_num = 0;
3142 3149
3143 if (reg->sleep_cookie) 3150 mwifiex_pcie_free_buffers(adapter);
3144 mwifiex_pcie_delete_sleep_cookie_buf(adapter);
3145
3146 mwifiex_pcie_delete_cmdrsp_buf(adapter);
3147 mwifiex_pcie_delete_evtbd_ring(adapter);
3148 mwifiex_pcie_delete_rxbd_ring(adapter);
3149 mwifiex_pcie_delete_txbd_ring(adapter);
3150 card->cmdrsp_buf = NULL;
3151} 3151}
3152 3152
3153static struct mwifiex_if_ops pcie_ops = { 3153static struct mwifiex_if_ops pcie_ops = {
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index caea350f05aa..bdc379178e87 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -1742,12 +1742,14 @@ void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val)
1742 unsigned long flags; 1742 unsigned long flags;
1743 struct rtl_c2hcmd *c2hcmd; 1743 struct rtl_c2hcmd *c2hcmd;
1744 1744
1745 c2hcmd = kmalloc(sizeof(*c2hcmd), GFP_KERNEL); 1745 c2hcmd = kmalloc(sizeof(*c2hcmd),
1746 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
1746 1747
1747 if (!c2hcmd) 1748 if (!c2hcmd)
1748 goto label_err; 1749 goto label_err;
1749 1750
1750 c2hcmd->val = kmalloc(len, GFP_KERNEL); 1751 c2hcmd->val = kmalloc(len,
1752 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
1751 1753
1752 if (!c2hcmd->val) 1754 if (!c2hcmd->val)
1753 goto label_err2; 1755 goto label_err2;
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 23d4a1728cdf..351bac8f6503 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -934,8 +934,14 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
934 rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL); 934 rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL);
935 if (rc < 0) 935 if (rc < 0)
936 goto out_unlock; 936 goto out_unlock;
937 nvdimm_bus_unlock(&nvdimm_bus->dev);
938
937 if (copy_to_user(p, buf, buf_len)) 939 if (copy_to_user(p, buf, buf_len))
938 rc = -EFAULT; 940 rc = -EFAULT;
941
942 vfree(buf);
943 return rc;
944
939 out_unlock: 945 out_unlock:
940 nvdimm_bus_unlock(&nvdimm_bus->dev); 946 nvdimm_bus_unlock(&nvdimm_bus->dev);
941 out: 947 out:
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index b3323c0697f6..ca6d572c48fc 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -243,7 +243,15 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
243 } 243 }
244 244
245 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) { 245 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
246 if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)) { 246 /*
247 * FIXME: nsio_rw_bytes() may be called from atomic
248 * context in the btt case and nvdimm_clear_poison()
249 * takes a sleeping lock. Until the locking can be
250 * reworked this capability requires that the namespace
251 * is not claimed by btt.
252 */
253 if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
254 && (!ndns->claim || !is_nd_btt(ndns->claim))) {
247 long cleared; 255 long cleared;
248 256
249 cleared = nvdimm_clear_poison(&ndns->dev, offset, size); 257 cleared = nvdimm_clear_poison(&ndns->dev, offset, size);
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 0eedc49e0d47..8b721321be5b 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(nvdimm_create);
395 395
396int alias_dpa_busy(struct device *dev, void *data) 396int alias_dpa_busy(struct device *dev, void *data)
397{ 397{
398 resource_size_t map_end, blk_start, new, busy; 398 resource_size_t map_end, blk_start, new;
399 struct blk_alloc_info *info = data; 399 struct blk_alloc_info *info = data;
400 struct nd_mapping *nd_mapping; 400 struct nd_mapping *nd_mapping;
401 struct nd_region *nd_region; 401 struct nd_region *nd_region;
@@ -436,29 +436,19 @@ int alias_dpa_busy(struct device *dev, void *data)
436 retry: 436 retry:
437 /* 437 /*
438 * Find the free dpa from the end of the last pmem allocation to 438 * Find the free dpa from the end of the last pmem allocation to
439 * the end of the interleave-set mapping that is not already 439 * the end of the interleave-set mapping.
440 * covered by a blk allocation.
441 */ 440 */
442 busy = 0;
443 for_each_dpa_resource(ndd, res) { 441 for_each_dpa_resource(ndd, res) {
442 if (strncmp(res->name, "pmem", 4) != 0)
443 continue;
444 if ((res->start >= blk_start && res->start < map_end) 444 if ((res->start >= blk_start && res->start < map_end)
445 || (res->end >= blk_start 445 || (res->end >= blk_start
446 && res->end <= map_end)) { 446 && res->end <= map_end)) {
447 if (strncmp(res->name, "pmem", 4) == 0) { 447 new = max(blk_start, min(map_end + 1, res->end + 1));
448 new = max(blk_start, min(map_end + 1, 448 if (new != blk_start) {
449 res->end + 1)); 449 blk_start = new;
450 if (new != blk_start) { 450 goto retry;
451 blk_start = new; 451 }
452 goto retry;
453 }
454 } else
455 busy += min(map_end, res->end)
456 - max(nd_mapping->start, res->start) + 1;
457 } else if (nd_mapping->start > res->start
458 && map_end < res->end) {
459 /* total eclipse of the PMEM region mapping */
460 busy += nd_mapping->size;
461 break;
462 } 452 }
463 } 453 }
464 454
@@ -470,52 +460,11 @@ int alias_dpa_busy(struct device *dev, void *data)
470 return 1; 460 return 1;
471 } 461 }
472 462
473 info->available -= blk_start - nd_mapping->start + busy; 463 info->available -= blk_start - nd_mapping->start;
474 464
475 return 0; 465 return 0;
476} 466}
477 467
478static int blk_dpa_busy(struct device *dev, void *data)
479{
480 struct blk_alloc_info *info = data;
481 struct nd_mapping *nd_mapping;
482 struct nd_region *nd_region;
483 resource_size_t map_end;
484 int i;
485
486 if (!is_nd_pmem(dev))
487 return 0;
488
489 nd_region = to_nd_region(dev);
490 for (i = 0; i < nd_region->ndr_mappings; i++) {
491 nd_mapping = &nd_region->mapping[i];
492 if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
493 break;
494 }
495
496 if (i >= nd_region->ndr_mappings)
497 return 0;
498
499 map_end = nd_mapping->start + nd_mapping->size - 1;
500 if (info->res->start >= nd_mapping->start
501 && info->res->start < map_end) {
502 if (info->res->end <= map_end) {
503 info->busy = 0;
504 return 1;
505 } else {
506 info->busy -= info->res->end - map_end;
507 return 0;
508 }
509 } else if (info->res->end >= nd_mapping->start
510 && info->res->end <= map_end) {
511 info->busy -= nd_mapping->start - info->res->start;
512 return 0;
513 } else {
514 info->busy -= nd_mapping->size;
515 return 0;
516 }
517}
518
519/** 468/**
520 * nd_blk_available_dpa - account the unused dpa of BLK region 469 * nd_blk_available_dpa - account the unused dpa of BLK region
521 * @nd_mapping: container of dpa-resource-root + labels 470 * @nd_mapping: container of dpa-resource-root + labels
@@ -545,11 +494,7 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
545 for_each_dpa_resource(ndd, res) { 494 for_each_dpa_resource(ndd, res) {
546 if (strncmp(res->name, "blk", 3) != 0) 495 if (strncmp(res->name, "blk", 3) != 0)
547 continue; 496 continue;
548 497 info.available -= resource_size(res);
549 info.res = res;
550 info.busy = resource_size(res);
551 device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy);
552 info.available -= info.busy;
553 } 498 }
554 499
555 return info.available; 500 return info.available;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 9b3b57fef446..eeb409c287b8 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -270,7 +270,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
270 memset(cmnd, 0, sizeof(*cmnd)); 270 memset(cmnd, 0, sizeof(*cmnd));
271 cmnd->dsm.opcode = nvme_cmd_dsm; 271 cmnd->dsm.opcode = nvme_cmd_dsm;
272 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); 272 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
273 cmnd->dsm.nr = segments - 1; 273 cmnd->dsm.nr = cpu_to_le32(segments - 1);
274 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 274 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
275 275
276 req->special_vec.bv_page = virt_to_page(range); 276 req->special_vec.bv_page = virt_to_page(range);
@@ -1316,6 +1316,14 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
1316 table->entries[state] = target; 1316 table->entries[state] = target;
1317 1317
1318 /* 1318 /*
1319 * Don't allow transitions to the deepest state
1320 * if it's quirked off.
1321 */
1322 if (state == ctrl->npss &&
1323 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
1324 continue;
1325
1326 /*
1319 * Is this state a useful non-operational state for 1327 * Is this state a useful non-operational state for
1320 * higher-power states to autonomously transition to? 1328 * higher-power states to autonomously transition to?
1321 */ 1329 */
@@ -1387,16 +1395,15 @@ struct nvme_core_quirk_entry {
1387}; 1395};
1388 1396
1389static const struct nvme_core_quirk_entry core_quirks[] = { 1397static const struct nvme_core_quirk_entry core_quirks[] = {
1390 /*
1391 * Seen on a Samsung "SM951 NVMe SAMSUNG 256GB": using APST causes
1392 * the controller to go out to lunch. It dies when the watchdog
1393 * timer reads CSTS and gets 0xffffffff.
1394 */
1395 { 1398 {
1396 .vid = 0x144d, 1399 /*
1397 .fr = "BXW75D0Q", 1400 * This Toshiba device seems to die using any APST states. See:
1401 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
1402 */
1403 .vid = 0x1179,
1404 .mn = "THNSF5256GPUK TOSHIBA",
1398 .quirks = NVME_QUIRK_NO_APST, 1405 .quirks = NVME_QUIRK_NO_APST,
1399 }, 1406 }
1400}; 1407};
1401 1408
1402/* match is null-terminated but idstr is space-padded. */ 1409/* match is null-terminated but idstr is space-padded. */
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 9690beb15e69..d996ca73d3be 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2023,7 +2023,7 @@ nvme_fc_configure_admin_queue(struct nvme_fc_ctrl *ctrl)
2023 } 2023 }
2024 2024
2025 ctrl->ctrl.sqsize = 2025 ctrl->ctrl.sqsize =
2026 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); 2026 min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
2027 2027
2028 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); 2028 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
2029 if (error) 2029 if (error)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 2aa20e3e5675..ab2d6ec7eb5c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -83,6 +83,11 @@ enum nvme_quirks {
83 * APST should not be used. 83 * APST should not be used.
84 */ 84 */
85 NVME_QUIRK_NO_APST = (1 << 4), 85 NVME_QUIRK_NO_APST = (1 << 4),
86
87 /*
88 * The deepest sleep state should not be used.
89 */
90 NVME_QUIRK_NO_DEEPEST_PS = (1 << 5),
86}; 91};
87 92
88/* 93/*
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 26a5fd05fe88..5d309535abbd 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -19,6 +19,7 @@
19#include <linux/blk-mq-pci.h> 19#include <linux/blk-mq-pci.h>
20#include <linux/cpu.h> 20#include <linux/cpu.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/dmi.h>
22#include <linux/errno.h> 23#include <linux/errno.h>
23#include <linux/fs.h> 24#include <linux/fs.h>
24#include <linux/genhd.h> 25#include <linux/genhd.h>
@@ -1943,10 +1944,31 @@ static int nvme_dev_map(struct nvme_dev *dev)
1943 return -ENODEV; 1944 return -ENODEV;
1944} 1945}
1945 1946
1947static unsigned long check_dell_samsung_bug(struct pci_dev *pdev)
1948{
1949 if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
1950 /*
1951 * Several Samsung devices seem to drop off the PCIe bus
1952 * randomly when APST is on and uses the deepest sleep state.
1953 * This has been observed on a Samsung "SM951 NVMe SAMSUNG
1954 * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD
1955 * 950 PRO 256GB", but it seems to be restricted to two Dell
1956 * laptops.
1957 */
1958 if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") &&
1959 (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
1960 dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
1961 return NVME_QUIRK_NO_DEEPEST_PS;
1962 }
1963
1964 return 0;
1965}
1966
1946static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1967static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1947{ 1968{
1948 int node, result = -ENOMEM; 1969 int node, result = -ENOMEM;
1949 struct nvme_dev *dev; 1970 struct nvme_dev *dev;
1971 unsigned long quirks = id->driver_data;
1950 1972
1951 node = dev_to_node(&pdev->dev); 1973 node = dev_to_node(&pdev->dev);
1952 if (node == NUMA_NO_NODE) 1974 if (node == NUMA_NO_NODE)
@@ -1978,8 +2000,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1978 if (result) 2000 if (result)
1979 goto put_pci; 2001 goto put_pci;
1980 2002
2003 quirks |= check_dell_samsung_bug(pdev);
2004
1981 result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, 2005 result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
1982 id->driver_data); 2006 quirks);
1983 if (result) 2007 if (result)
1984 goto release_pools; 2008 goto release_pools;
1985 2009
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 779f516e7a4e..16f84eb0b95e 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -343,8 +343,6 @@ static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
343 struct ib_device *ibdev = dev->dev; 343 struct ib_device *ibdev = dev->dev;
344 int ret; 344 int ret;
345 345
346 BUG_ON(queue_idx >= ctrl->queue_count);
347
348 ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command), 346 ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command),
349 DMA_TO_DEVICE); 347 DMA_TO_DEVICE);
350 if (ret) 348 if (ret)
@@ -652,8 +650,22 @@ out_free_queues:
652 650
653static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl) 651static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
654{ 652{
653 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
654 unsigned int nr_io_queues;
655 int i, ret; 655 int i, ret;
656 656
657 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
658 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
659 if (ret)
660 return ret;
661
662 ctrl->queue_count = nr_io_queues + 1;
663 if (ctrl->queue_count < 2)
664 return 0;
665
666 dev_info(ctrl->ctrl.device,
667 "creating %d I/O queues.\n", nr_io_queues);
668
657 for (i = 1; i < ctrl->queue_count; i++) { 669 for (i = 1; i < ctrl->queue_count; i++) {
658 ret = nvme_rdma_init_queue(ctrl, i, 670 ret = nvme_rdma_init_queue(ctrl, i,
659 ctrl->ctrl.opts->queue_size); 671 ctrl->ctrl.opts->queue_size);
@@ -1594,7 +1606,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
1594 } 1606 }
1595 1607
1596 ctrl->ctrl.sqsize = 1608 ctrl->ctrl.sqsize =
1597 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); 1609 min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
1598 1610
1599 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); 1611 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
1600 if (error) 1612 if (error)
@@ -1791,20 +1803,8 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
1791 1803
1792static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl) 1804static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
1793{ 1805{
1794 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
1795 int ret; 1806 int ret;
1796 1807
1797 ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
1798 if (ret)
1799 return ret;
1800
1801 ctrl->queue_count = opts->nr_io_queues + 1;
1802 if (ctrl->queue_count < 2)
1803 return 0;
1804
1805 dev_info(ctrl->ctrl.device,
1806 "creating %d I/O queues.\n", opts->nr_io_queues);
1807
1808 ret = nvme_rdma_init_io_queues(ctrl); 1808 ret = nvme_rdma_init_io_queues(ctrl);
1809 if (ret) 1809 if (ret)
1810 return ret; 1810 return ret;
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index a7bcff45f437..76450b0c55f1 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -100,7 +100,7 @@ static u16 nvmet_get_smart_log(struct nvmet_req *req,
100 u16 status; 100 u16 status;
101 101
102 WARN_ON(req == NULL || slog == NULL); 102 WARN_ON(req == NULL || slog == NULL);
103 if (req->cmd->get_log_page.nsid == 0xFFFFFFFF) 103 if (req->cmd->get_log_page.nsid == cpu_to_le32(0xFFFFFFFF))
104 status = nvmet_get_smart_log_all(req, slog); 104 status = nvmet_get_smart_log_all(req, slog);
105 else 105 else
106 status = nvmet_get_smart_log_nsid(req, slog); 106 status = nvmet_get_smart_log_nsid(req, slog);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 11b0a0a5f661..798653b329b2 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -425,6 +425,13 @@ void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
425 ctrl->sqs[qid] = sq; 425 ctrl->sqs[qid] = sq;
426} 426}
427 427
428static void nvmet_confirm_sq(struct percpu_ref *ref)
429{
430 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
431
432 complete(&sq->confirm_done);
433}
434
428void nvmet_sq_destroy(struct nvmet_sq *sq) 435void nvmet_sq_destroy(struct nvmet_sq *sq)
429{ 436{
430 /* 437 /*
@@ -433,7 +440,8 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
433 */ 440 */
434 if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq) 441 if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
435 nvmet_async_events_free(sq->ctrl); 442 nvmet_async_events_free(sq->ctrl);
436 percpu_ref_kill(&sq->ref); 443 percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
444 wait_for_completion(&sq->confirm_done);
437 wait_for_completion(&sq->free_done); 445 wait_for_completion(&sq->free_done);
438 percpu_ref_exit(&sq->ref); 446 percpu_ref_exit(&sq->ref);
439 447
@@ -461,6 +469,7 @@ int nvmet_sq_init(struct nvmet_sq *sq)
461 return ret; 469 return ret;
462 } 470 }
463 init_completion(&sq->free_done); 471 init_completion(&sq->free_done);
472 init_completion(&sq->confirm_done);
464 473
465 return 0; 474 return 0;
466} 475}
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index 4195115c7e54..6b0baa9caab9 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -180,7 +180,7 @@ static void nvmet_execute_write_zeroes(struct nvmet_req *req)
180 180
181 sector = le64_to_cpu(write_zeroes->slba) << 181 sector = le64_to_cpu(write_zeroes->slba) <<
182 (req->ns->blksize_shift - 9); 182 (req->ns->blksize_shift - 9);
183 nr_sector = (((sector_t)le32_to_cpu(write_zeroes->length)) << 183 nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length)) <<
184 (req->ns->blksize_shift - 9)) + 1; 184 (req->ns->blksize_shift - 9)) + 1;
185 185
186 if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector, 186 if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
@@ -230,7 +230,7 @@ int nvmet_parse_io_cmd(struct nvmet_req *req)
230 return 0; 230 return 0;
231 case nvme_cmd_dsm: 231 case nvme_cmd_dsm:
232 req->execute = nvmet_execute_dsm; 232 req->execute = nvmet_execute_dsm;
233 req->data_len = le32_to_cpu(cmd->dsm.nr + 1) * 233 req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
234 sizeof(struct nvme_dsm_range); 234 sizeof(struct nvme_dsm_range);
235 return 0; 235 return 0;
236 case nvme_cmd_write_zeroes: 236 case nvme_cmd_write_zeroes:
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index d1f06e7768ff..c7b0b6a52708 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -223,8 +223,6 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
223static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, 223static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
224 struct nvme_loop_iod *iod, unsigned int queue_idx) 224 struct nvme_loop_iod *iod, unsigned int queue_idx)
225{ 225{
226 BUG_ON(queue_idx >= ctrl->queue_count);
227
228 iod->req.cmd = &iod->cmd; 226 iod->req.cmd = &iod->cmd;
229 iod->req.rsp = &iod->rsp; 227 iod->req.rsp = &iod->rsp;
230 iod->queue = &ctrl->queues[queue_idx]; 228 iod->queue = &ctrl->queues[queue_idx];
@@ -288,9 +286,9 @@ static struct blk_mq_ops nvme_loop_admin_mq_ops = {
288 286
289static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) 287static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
290{ 288{
289 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
291 blk_cleanup_queue(ctrl->ctrl.admin_q); 290 blk_cleanup_queue(ctrl->ctrl.admin_q);
292 blk_mq_free_tag_set(&ctrl->admin_tag_set); 291 blk_mq_free_tag_set(&ctrl->admin_tag_set);
293 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
294} 292}
295 293
296static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) 294static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
@@ -314,6 +312,43 @@ free_ctrl:
314 kfree(ctrl); 312 kfree(ctrl);
315} 313}
316 314
315static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
316{
317 int i;
318
319 for (i = 1; i < ctrl->queue_count; i++)
320 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
321}
322
323static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
324{
325 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
326 unsigned int nr_io_queues;
327 int ret, i;
328
329 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
330 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
331 if (ret || !nr_io_queues)
332 return ret;
333
334 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
335
336 for (i = 1; i <= nr_io_queues; i++) {
337 ctrl->queues[i].ctrl = ctrl;
338 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
339 if (ret)
340 goto out_destroy_queues;
341
342 ctrl->queue_count++;
343 }
344
345 return 0;
346
347out_destroy_queues:
348 nvme_loop_destroy_io_queues(ctrl);
349 return ret;
350}
351
317static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) 352static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
318{ 353{
319 int error; 354 int error;
@@ -357,7 +392,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
357 } 392 }
358 393
359 ctrl->ctrl.sqsize = 394 ctrl->ctrl.sqsize =
360 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); 395 min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
361 396
362 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); 397 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
363 if (error) 398 if (error)
@@ -385,17 +420,13 @@ out_free_sq:
385 420
386static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) 421static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
387{ 422{
388 int i;
389
390 nvme_stop_keep_alive(&ctrl->ctrl); 423 nvme_stop_keep_alive(&ctrl->ctrl);
391 424
392 if (ctrl->queue_count > 1) { 425 if (ctrl->queue_count > 1) {
393 nvme_stop_queues(&ctrl->ctrl); 426 nvme_stop_queues(&ctrl->ctrl);
394 blk_mq_tagset_busy_iter(&ctrl->tag_set, 427 blk_mq_tagset_busy_iter(&ctrl->tag_set,
395 nvme_cancel_request, &ctrl->ctrl); 428 nvme_cancel_request, &ctrl->ctrl);
396 429 nvme_loop_destroy_io_queues(ctrl);
397 for (i = 1; i < ctrl->queue_count; i++)
398 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
399 } 430 }
400 431
401 if (ctrl->ctrl.state == NVME_CTRL_LIVE) 432 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
@@ -467,19 +498,14 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
467 if (ret) 498 if (ret)
468 goto out_disable; 499 goto out_disable;
469 500
470 for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) { 501 ret = nvme_loop_init_io_queues(ctrl);
471 ctrl->queues[i].ctrl = ctrl; 502 if (ret)
472 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); 503 goto out_destroy_admin;
473 if (ret)
474 goto out_free_queues;
475
476 ctrl->queue_count++;
477 }
478 504
479 for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) { 505 for (i = 1; i < ctrl->queue_count; i++) {
480 ret = nvmf_connect_io_queue(&ctrl->ctrl, i); 506 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
481 if (ret) 507 if (ret)
482 goto out_free_queues; 508 goto out_destroy_io;
483 } 509 }
484 510
485 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 511 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
@@ -492,9 +518,9 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
492 518
493 return; 519 return;
494 520
495out_free_queues: 521out_destroy_io:
496 for (i = 1; i < ctrl->queue_count; i++) 522 nvme_loop_destroy_io_queues(ctrl);
497 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); 523out_destroy_admin:
498 nvme_loop_destroy_admin_queue(ctrl); 524 nvme_loop_destroy_admin_queue(ctrl);
499out_disable: 525out_disable:
500 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); 526 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
@@ -533,25 +559,12 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
533 559
534static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) 560static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
535{ 561{
536 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
537 int ret, i; 562 int ret, i;
538 563
539 ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); 564 ret = nvme_loop_init_io_queues(ctrl);
540 if (ret || !opts->nr_io_queues) 565 if (ret)
541 return ret; 566 return ret;
542 567
543 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
544 opts->nr_io_queues);
545
546 for (i = 1; i <= opts->nr_io_queues; i++) {
547 ctrl->queues[i].ctrl = ctrl;
548 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
549 if (ret)
550 goto out_destroy_queues;
551
552 ctrl->queue_count++;
553 }
554
555 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); 568 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
556 ctrl->tag_set.ops = &nvme_loop_mq_ops; 569 ctrl->tag_set.ops = &nvme_loop_mq_ops;
557 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; 570 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
@@ -575,7 +588,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
575 goto out_free_tagset; 588 goto out_free_tagset;
576 } 589 }
577 590
578 for (i = 1; i <= opts->nr_io_queues; i++) { 591 for (i = 1; i < ctrl->queue_count; i++) {
579 ret = nvmf_connect_io_queue(&ctrl->ctrl, i); 592 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
580 if (ret) 593 if (ret)
581 goto out_cleanup_connect_q; 594 goto out_cleanup_connect_q;
@@ -588,8 +601,7 @@ out_cleanup_connect_q:
588out_free_tagset: 601out_free_tagset:
589 blk_mq_free_tag_set(&ctrl->tag_set); 602 blk_mq_free_tag_set(&ctrl->tag_set);
590out_destroy_queues: 603out_destroy_queues:
591 for (i = 1; i < ctrl->queue_count; i++) 604 nvme_loop_destroy_io_queues(ctrl);
592 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
593 return ret; 605 return ret;
594} 606}
595 607
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 1370eee0a3c0..f7ff15f17ca9 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -73,6 +73,7 @@ struct nvmet_sq {
73 u16 qid; 73 u16 qid;
74 u16 size; 74 u16 size;
75 struct completion free_done; 75 struct completion free_done;
76 struct completion confirm_done;
76}; 77};
77 78
78/** 79/**
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 9aa1da3778b3..ecc4fe862561 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -703,11 +703,6 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
703{ 703{
704 u16 status; 704 u16 status;
705 705
706 cmd->queue = queue;
707 cmd->n_rdma = 0;
708 cmd->req.port = queue->port;
709
710
711 ib_dma_sync_single_for_cpu(queue->dev->device, 706 ib_dma_sync_single_for_cpu(queue->dev->device,
712 cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, 707 cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
713 DMA_FROM_DEVICE); 708 DMA_FROM_DEVICE);
@@ -760,9 +755,12 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
760 755
761 cmd->queue = queue; 756 cmd->queue = queue;
762 rsp = nvmet_rdma_get_rsp(queue); 757 rsp = nvmet_rdma_get_rsp(queue);
758 rsp->queue = queue;
763 rsp->cmd = cmd; 759 rsp->cmd = cmd;
764 rsp->flags = 0; 760 rsp->flags = 0;
765 rsp->req.cmd = cmd->nvme_cmd; 761 rsp->req.cmd = cmd->nvme_cmd;
762 rsp->req.port = queue->port;
763 rsp->n_rdma = 0;
766 764
767 if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { 765 if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
768 unsigned long flags; 766 unsigned long flags;
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index bc090daa850a..5dc53d420ca8 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -939,8 +939,10 @@ parport_register_dev_model(struct parport *port, const char *name,
939 * pardevice fields. -arca 939 * pardevice fields. -arca
940 */ 940 */
941 port->ops->init_state(par_dev, par_dev->state); 941 port->ops->init_state(par_dev, par_dev->state);
942 port->proc_device = par_dev; 942 if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
943 parport_device_proc_register(par_dev); 943 port->proc_device = par_dev;
944 parport_device_proc_register(par_dev);
945 }
944 946
945 return par_dev; 947 return par_dev;
946 948
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig
index dfb8a69afc28..d2d2ba5b8a68 100644
--- a/drivers/pci/dwc/Kconfig
+++ b/drivers/pci/dwc/Kconfig
@@ -89,6 +89,7 @@ config PCI_HISI
89 depends on PCI_MSI_IRQ_DOMAIN 89 depends on PCI_MSI_IRQ_DOMAIN
90 select PCIEPORTBUS 90 select PCIEPORTBUS
91 select PCIE_DW_HOST 91 select PCIE_DW_HOST
92 select PCI_HOST_COMMON
92 help 93 help
93 Say Y here if you want PCIe controller support on HiSilicon 94 Say Y here if you want PCIe controller support on HiSilicon
94 Hip05 and Hip06 SoCs 95 Hip05 and Hip06 SoCs
diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c
index fcd3ef845883..6d23683c0892 100644
--- a/drivers/pci/dwc/pcie-artpec6.c
+++ b/drivers/pci/dwc/pcie-artpec6.c
@@ -234,6 +234,9 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
234 return 0; 234 return 0;
235} 235}
236 236
237static const struct dw_pcie_ops dw_pcie_ops = {
238};
239
237static int artpec6_pcie_probe(struct platform_device *pdev) 240static int artpec6_pcie_probe(struct platform_device *pdev)
238{ 241{
239 struct device *dev = &pdev->dev; 242 struct device *dev = &pdev->dev;
@@ -252,6 +255,7 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
252 return -ENOMEM; 255 return -ENOMEM;
253 256
254 pci->dev = dev; 257 pci->dev = dev;
258 pci->ops = &dw_pcie_ops;
255 259
256 artpec6_pcie->pci = pci; 260 artpec6_pcie->pci = pci;
257 261
diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c
index b6c832ba39dd..f20d494922ab 100644
--- a/drivers/pci/dwc/pcie-designware-plat.c
+++ b/drivers/pci/dwc/pcie-designware-plat.c
@@ -86,6 +86,9 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
86 return 0; 86 return 0;
87} 87}
88 88
89static const struct dw_pcie_ops dw_pcie_ops = {
90};
91
89static int dw_plat_pcie_probe(struct platform_device *pdev) 92static int dw_plat_pcie_probe(struct platform_device *pdev)
90{ 93{
91 struct device *dev = &pdev->dev; 94 struct device *dev = &pdev->dev;
@@ -103,6 +106,7 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
103 return -ENOMEM; 106 return -ENOMEM;
104 107
105 pci->dev = dev; 108 pci->dev = dev;
109 pci->ops = &dw_pcie_ops;
106 110
107 dw_plat_pcie->pci = pci; 111 dw_plat_pcie->pci = pci;
108 112
diff --git a/drivers/pci/dwc/pcie-hisi.c b/drivers/pci/dwc/pcie-hisi.c
index fd66a3199db7..cf9d6a9d9fd4 100644
--- a/drivers/pci/dwc/pcie-hisi.c
+++ b/drivers/pci/dwc/pcie-hisi.c
@@ -380,9 +380,13 @@ struct pci_ecam_ops hisi_pcie_platform_ops = {
380 380
381static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = { 381static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = {
382 { 382 {
383 .compatible = "hisilicon,pcie-almost-ecam", 383 .compatible = "hisilicon,hip06-pcie-ecam",
384 .data = (void *) &hisi_pcie_platform_ops, 384 .data = (void *) &hisi_pcie_platform_ops,
385 }, 385 },
386 {
387 .compatible = "hisilicon,hip07-pcie-ecam",
388 .data = (void *) &hisi_pcie_platform_ops,
389 },
386 {}, 390 {},
387}; 391};
388 392
diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c
index 52b5bdccf5f0..6e031b522529 100644
--- a/drivers/pci/host/pci-thunder-pem.c
+++ b/drivers/pci/host/pci-thunder-pem.c
@@ -14,6 +14,7 @@
14 * Copyright (C) 2015 - 2016 Cavium, Inc. 14 * Copyright (C) 2015 - 2016 Cavium, Inc.
15 */ 15 */
16 16
17#include <linux/bitfield.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/of_address.h> 20#include <linux/of_address.h>
@@ -334,6 +335,49 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
334 335
335#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) 336#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
336 337
338#define PEM_RES_BASE 0x87e0c0000000UL
339#define PEM_NODE_MASK GENMASK(45, 44)
340#define PEM_INDX_MASK GENMASK(26, 24)
341#define PEM_MIN_DOM_IN_NODE 4
342#define PEM_MAX_DOM_IN_NODE 10
343
344static void thunder_pem_reserve_range(struct device *dev, int seg,
345 struct resource *r)
346{
347 resource_size_t start = r->start, end = r->end;
348 struct resource *res;
349 const char *regionid;
350
351 regionid = kasprintf(GFP_KERNEL, "PEM RC:%d", seg);
352 if (!regionid)
353 return;
354
355 res = request_mem_region(start, end - start + 1, regionid);
356 if (res)
357 res->flags &= ~IORESOURCE_BUSY;
358 else
359 kfree(regionid);
360
361 dev_info(dev, "%pR %s reserved\n", r,
362 res ? "has been" : "could not be");
363}
364
365static void thunder_pem_legacy_fw(struct acpi_pci_root *root,
366 struct resource *res_pem)
367{
368 int node = acpi_get_node(root->device->handle);
369 int index;
370
371 if (node == NUMA_NO_NODE)
372 node = 0;
373
374 index = root->segment - PEM_MIN_DOM_IN_NODE;
375 index -= node * PEM_MAX_DOM_IN_NODE;
376 res_pem->start = PEM_RES_BASE | FIELD_PREP(PEM_NODE_MASK, node) |
377 FIELD_PREP(PEM_INDX_MASK, index);
378 res_pem->flags = IORESOURCE_MEM;
379}
380
337static int thunder_pem_acpi_init(struct pci_config_window *cfg) 381static int thunder_pem_acpi_init(struct pci_config_window *cfg)
338{ 382{
339 struct device *dev = cfg->parent; 383 struct device *dev = cfg->parent;
@@ -346,10 +390,24 @@ static int thunder_pem_acpi_init(struct pci_config_window *cfg)
346 if (!res_pem) 390 if (!res_pem)
347 return -ENOMEM; 391 return -ENOMEM;
348 392
349 ret = acpi_get_rc_resources(dev, "THRX0002", root->segment, res_pem); 393 ret = acpi_get_rc_resources(dev, "CAVA02B", root->segment, res_pem);
394
395 /*
396 * If we fail to gather resources it means that we run with old
397 * FW where we need to calculate PEM-specific resources manually.
398 */
350 if (ret) { 399 if (ret) {
351 dev_err(dev, "can't get rc base address\n"); 400 thunder_pem_legacy_fw(root, res_pem);
352 return ret; 401 /*
402 * Reserve 64K size PEM specific resources. The full 16M range
403 * size is required for thunder_pem_init() call.
404 */
405 res_pem->end = res_pem->start + SZ_64K - 1;
406 thunder_pem_reserve_range(dev, root->segment, res_pem);
407 res_pem->end = res_pem->start + SZ_16M - 1;
408
409 /* Reserve PCI configuration space as well. */
410 thunder_pem_reserve_range(dev, root->segment, &cfg->res);
353 } 411 }
354 412
355 return thunder_pem_init(dev, cfg, res_pem); 413 return thunder_pem_init(dev, cfg, res_pem);
diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c
index bd4c9ec25edc..384c27e664fe 100644
--- a/drivers/pci/host/pcie-iproc-bcma.c
+++ b/drivers/pci/host/pcie-iproc-bcma.c
@@ -44,8 +44,7 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
44{ 44{
45 struct device *dev = &bdev->dev; 45 struct device *dev = &bdev->dev;
46 struct iproc_pcie *pcie; 46 struct iproc_pcie *pcie;
47 LIST_HEAD(res); 47 LIST_HEAD(resources);
48 struct resource res_mem;
49 int ret; 48 int ret;
50 49
51 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 50 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
@@ -63,22 +62,23 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
63 62
64 pcie->base_addr = bdev->addr; 63 pcie->base_addr = bdev->addr;
65 64
66 res_mem.start = bdev->addr_s[0]; 65 pcie->mem.start = bdev->addr_s[0];
67 res_mem.end = bdev->addr_s[0] + SZ_128M - 1; 66 pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1;
68 res_mem.name = "PCIe MEM space"; 67 pcie->mem.name = "PCIe MEM space";
69 res_mem.flags = IORESOURCE_MEM; 68 pcie->mem.flags = IORESOURCE_MEM;
70 pci_add_resource(&res, &res_mem); 69 pci_add_resource(&resources, &pcie->mem);
71 70
72 pcie->map_irq = iproc_pcie_bcma_map_irq; 71 pcie->map_irq = iproc_pcie_bcma_map_irq;
73 72
74 ret = iproc_pcie_setup(pcie, &res); 73 ret = iproc_pcie_setup(pcie, &resources);
75 if (ret) 74 if (ret) {
76 dev_err(dev, "PCIe controller setup failed\n"); 75 dev_err(dev, "PCIe controller setup failed\n");
77 76 pci_free_resource_list(&resources);
78 pci_free_resource_list(&res); 77 return ret;
78 }
79 79
80 bcma_set_drvdata(bdev, pcie); 80 bcma_set_drvdata(bdev, pcie);
81 return ret; 81 return 0;
82} 82}
83 83
84static void iproc_pcie_bcma_remove(struct bcma_device *bdev) 84static void iproc_pcie_bcma_remove(struct bcma_device *bdev)
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index f4909bb0b2ad..8c6a327ca6cd 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -51,7 +51,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
51 struct device_node *np = dev->of_node; 51 struct device_node *np = dev->of_node;
52 struct resource reg; 52 struct resource reg;
53 resource_size_t iobase = 0; 53 resource_size_t iobase = 0;
54 LIST_HEAD(res); 54 LIST_HEAD(resources);
55 int ret; 55 int ret;
56 56
57 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 57 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
@@ -96,10 +96,10 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
96 pcie->phy = NULL; 96 pcie->phy = NULL;
97 } 97 }
98 98
99 ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &iobase); 99 ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &resources,
100 &iobase);
100 if (ret) { 101 if (ret) {
101 dev_err(dev, 102 dev_err(dev, "unable to get PCI host bridge resources\n");
102 "unable to get PCI host bridge resources\n");
103 return ret; 103 return ret;
104 } 104 }
105 105
@@ -112,14 +112,15 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
112 pcie->map_irq = of_irq_parse_and_map_pci; 112 pcie->map_irq = of_irq_parse_and_map_pci;
113 } 113 }
114 114
115 ret = iproc_pcie_setup(pcie, &res); 115 ret = iproc_pcie_setup(pcie, &resources);
116 if (ret) 116 if (ret) {
117 dev_err(dev, "PCIe controller setup failed\n"); 117 dev_err(dev, "PCIe controller setup failed\n");
118 118 pci_free_resource_list(&resources);
119 pci_free_resource_list(&res); 119 return ret;
120 }
120 121
121 platform_set_drvdata(pdev, pcie); 122 platform_set_drvdata(pdev, pcie);
122 return ret; 123 return 0;
123} 124}
124 125
125static int iproc_pcie_pltfm_remove(struct platform_device *pdev) 126static int iproc_pcie_pltfm_remove(struct platform_device *pdev)
diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h
index 04fed8e907f1..0bbe2ea44f3e 100644
--- a/drivers/pci/host/pcie-iproc.h
+++ b/drivers/pci/host/pcie-iproc.h
@@ -90,6 +90,7 @@ struct iproc_pcie {
90#ifdef CONFIG_ARM 90#ifdef CONFIG_ARM
91 struct pci_sys_data sysdata; 91 struct pci_sys_data sysdata;
92#endif 92#endif
93 struct resource mem;
93 struct pci_bus *root_bus; 94 struct pci_bus *root_bus;
94 struct phy *phy; 95 struct phy *phy;
95 int (*map_irq)(const struct pci_dev *, u8, u8); 96 int (*map_irq)(const struct pci_dev *, u8, u8);
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index dc5277ad1b5a..005cadb7a3f8 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -449,6 +449,7 @@ config PHY_QCOM_UFS
449config PHY_QCOM_USB_HS 449config PHY_QCOM_USB_HS
450 tristate "Qualcomm USB HS PHY module" 450 tristate "Qualcomm USB HS PHY module"
451 depends on USB_ULPI_BUS 451 depends on USB_ULPI_BUS
452 depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in
452 select GENERIC_PHY 453 select GENERIC_PHY
453 help 454 help
454 Support for the USB high-speed ULPI compliant phy on Qualcomm 455 Support for the USB high-speed ULPI compliant phy on Qualcomm
@@ -510,12 +511,4 @@ config PHY_MESON8B_USB2
510 and GXBB SoCs. 511 and GXBB SoCs.
511 If unsure, say N. 512 If unsure, say N.
512 513
513config PHY_NSP_USB3
514 tristate "Broadcom NorthStar plus USB3 PHY driver"
515 depends on OF && (ARCH_BCM_NSP || COMPILE_TEST)
516 select GENERIC_PHY
517 default ARCH_BCM_NSP
518 help
519 Enable this to support the Broadcom Northstar plus USB3 PHY.
520 If unsure, say N.
521endmenu 514endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index e7b0feb1e125..dd8f3b5d2918 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -62,4 +62,3 @@ obj-$(CONFIG_PHY_CYGNUS_PCIE) += phy-bcm-cygnus-pcie.o
62obj-$(CONFIG_ARCH_TEGRA) += tegra/ 62obj-$(CONFIG_ARCH_TEGRA) += tegra/
63obj-$(CONFIG_PHY_NS2_PCIE) += phy-bcm-ns2-pcie.o 63obj-$(CONFIG_PHY_NS2_PCIE) += phy-bcm-ns2-pcie.o
64obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o 64obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o
65obj-$(CONFIG_PHY_NSP_USB3) += phy-bcm-nsp-usb3.o
diff --git a/drivers/phy/phy-bcm-nsp-usb3.c b/drivers/phy/phy-bcm-nsp-usb3.c
deleted file mode 100644
index 49024eaa5545..000000000000
--- a/drivers/phy/phy-bcm-nsp-usb3.c
+++ /dev/null
@@ -1,177 +0,0 @@
1/*
2 * Copyright (C) 2016 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation version 2.
7 *
8 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
9 * kind, whether express or implied; without even the implied warranty
10 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/delay.h>
15#include <linux/io.h>
16#include <linux/kernel.h>
17#include <linux/mfd/syscon.h>
18#include <linux/mdio.h>
19#include <linux/module.h>
20#include <linux/of.h>
21#include <linux/of_address.h>
22#include <linux/phy/phy.h>
23#include <linux/regmap.h>
24
25#define NSP_USB3_RST_CTRL_OFFSET 0x3f8
26
27/* mdio reg access */
28#define NSP_USB3_PHY_BASE_ADDR_REG 0x1f
29
30#define NSP_USB3_PHY_PLL30_BLOCK 0x8000
31#define NSP_USB3_PLL_CONTROL 0x01
32#define NSP_USB3_PLLA_CONTROL0 0x0a
33#define NSP_USB3_PLLA_CONTROL1 0x0b
34
35#define NSP_USB3_PHY_TX_PMD_BLOCK 0x8040
36#define NSP_USB3_TX_PMD_CONTROL1 0x01
37
38#define NSP_USB3_PHY_PIPE_BLOCK 0x8060
39#define NSP_USB3_LFPS_CMP 0x02
40#define NSP_USB3_LFPS_DEGLITCH 0x03
41
42struct nsp_usb3_phy {
43 struct regmap *usb3_ctrl;
44 struct phy *phy;
45 struct mdio_device *mdiodev;
46};
47
48static int nsp_usb3_phy_init(struct phy *phy)
49{
50 struct nsp_usb3_phy *iphy = phy_get_drvdata(phy);
51 struct mii_bus *bus = iphy->mdiodev->bus;
52 int addr = iphy->mdiodev->addr;
53 u32 data;
54 int rc;
55
56 rc = regmap_read(iphy->usb3_ctrl, 0, &data);
57 if (rc)
58 return rc;
59 data |= 1;
60 rc = regmap_write(iphy->usb3_ctrl, 0, data);
61 if (rc)
62 return rc;
63
64 rc = regmap_write(iphy->usb3_ctrl, NSP_USB3_RST_CTRL_OFFSET, 1);
65 if (rc)
66 return rc;
67
68 rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG,
69 NSP_USB3_PHY_PLL30_BLOCK);
70 if (rc)
71 return rc;
72
73 rc = mdiobus_write(bus, addr, NSP_USB3_PLL_CONTROL, 0x1000);
74 if (rc)
75 return rc;
76
77 rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL0, 0x6400);
78 if (rc)
79 return rc;
80
81 rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL1, 0xc000);
82 if (rc)
83 return rc;
84
85 rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL1, 0x8000);
86 if (rc)
87 return rc;
88
89 rc = regmap_write(iphy->usb3_ctrl, NSP_USB3_RST_CTRL_OFFSET, 0);
90 if (rc)
91 return rc;
92
93 rc = mdiobus_write(bus, addr, NSP_USB3_PLL_CONTROL, 0x9000);
94 if (rc)
95 return rc;
96
97 rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG,
98 NSP_USB3_PHY_PIPE_BLOCK);
99 if (rc)
100 return rc;
101
102 rc = mdiobus_write(bus, addr, NSP_USB3_LFPS_CMP, 0xf30d);
103 if (rc)
104 return rc;
105
106 rc = mdiobus_write(bus, addr, NSP_USB3_LFPS_DEGLITCH, 0x6302);
107 if (rc)
108 return rc;
109
110 rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG,
111 NSP_USB3_PHY_TX_PMD_BLOCK);
112 if (rc)
113 return rc;
114
115 rc = mdiobus_write(bus, addr, NSP_USB3_TX_PMD_CONTROL1, 0x1003);
116
117 return rc;
118}
119
120static struct phy_ops nsp_usb3_phy_ops = {
121 .init = nsp_usb3_phy_init,
122 .owner = THIS_MODULE,
123};
124
125static int nsp_usb3_phy_probe(struct mdio_device *mdiodev)
126{
127 struct device *dev = &mdiodev->dev;
128 struct phy_provider *provider;
129 struct nsp_usb3_phy *iphy;
130
131 iphy = devm_kzalloc(dev, sizeof(*iphy), GFP_KERNEL);
132 if (!iphy)
133 return -ENOMEM;
134 iphy->mdiodev = mdiodev;
135
136 iphy->usb3_ctrl = syscon_regmap_lookup_by_phandle(dev->of_node,
137 "usb3-ctrl-syscon");
138 if (IS_ERR(iphy->usb3_ctrl))
139 return PTR_ERR(iphy->usb3_ctrl);
140
141 iphy->phy = devm_phy_create(dev, dev->of_node, &nsp_usb3_phy_ops);
142 if (IS_ERR(iphy->phy)) {
143 dev_err(dev, "failed to create PHY\n");
144 return PTR_ERR(iphy->phy);
145 }
146
147 phy_set_drvdata(iphy->phy, iphy);
148
149 provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
150 if (IS_ERR(provider)) {
151 dev_err(dev, "could not register PHY provider\n");
152 return PTR_ERR(provider);
153 }
154
155 return 0;
156}
157
158static const struct of_device_id nsp_usb3_phy_of_match[] = {
159 {.compatible = "brcm,nsp-usb3-phy",},
160 { /* sentinel */ }
161};
162
163static struct mdio_driver nsp_usb3_phy_driver = {
164 .mdiodrv = {
165 .driver = {
166 .name = "nsp-usb3-phy",
167 .of_match_table = nsp_usb3_phy_of_match,
168 },
169 },
170 .probe = nsp_usb3_phy_probe,
171};
172
173mdio_module_driver(nsp_usb3_phy_driver);
174
175MODULE_DESCRIPTION("Broadcom NSP USB3 PHY driver");
176MODULE_LICENSE("GPL v2");
177MODULE_AUTHOR("Yendapally Reddy Dhananjaya Reddy <yendapally.reddy@broadcom.com");
diff --git a/drivers/phy/phy-exynos-pcie.c b/drivers/phy/phy-exynos-pcie.c
index 4f60b83641d5..60baf25d98e2 100644
--- a/drivers/phy/phy-exynos-pcie.c
+++ b/drivers/phy/phy-exynos-pcie.c
@@ -254,8 +254,8 @@ static int exynos_pcie_phy_probe(struct platform_device *pdev)
254 254
255 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 255 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
256 exynos_phy->blk_base = devm_ioremap_resource(dev, res); 256 exynos_phy->blk_base = devm_ioremap_resource(dev, res);
257 if (IS_ERR(exynos_phy->phy_base)) 257 if (IS_ERR(exynos_phy->blk_base))
258 return PTR_ERR(exynos_phy->phy_base); 258 return PTR_ERR(exynos_phy->blk_base);
259 259
260 exynos_phy->drv_data = drv_data; 260 exynos_phy->drv_data = drv_data;
261 261
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index d69046537b75..32822b0d9cd0 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -2010,29 +2010,57 @@ out_err:
2010 return ERR_PTR(ret); 2010 return ERR_PTR(ret);
2011} 2011}
2012 2012
2013static int pinctrl_create_and_start(struct pinctrl_dev *pctldev) 2013static int pinctrl_claim_hogs(struct pinctrl_dev *pctldev)
2014{ 2014{
2015 pctldev->p = create_pinctrl(pctldev->dev, pctldev); 2015 pctldev->p = create_pinctrl(pctldev->dev, pctldev);
2016 if (!IS_ERR(pctldev->p)) { 2016 if (PTR_ERR(pctldev->p) == -ENODEV) {
2017 kref_get(&pctldev->p->users); 2017 dev_dbg(pctldev->dev, "no hogs found\n");
2018 pctldev->hog_default =
2019 pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT);
2020 if (IS_ERR(pctldev->hog_default)) {
2021 dev_dbg(pctldev->dev,
2022 "failed to lookup the default state\n");
2023 } else {
2024 if (pinctrl_select_state(pctldev->p,
2025 pctldev->hog_default))
2026 dev_err(pctldev->dev,
2027 "failed to select default state\n");
2028 }
2029 2018
2030 pctldev->hog_sleep = 2019 return 0;
2031 pinctrl_lookup_state(pctldev->p, 2020 }
2032 PINCTRL_STATE_SLEEP); 2021
2033 if (IS_ERR(pctldev->hog_sleep)) 2022 if (IS_ERR(pctldev->p)) {
2034 dev_dbg(pctldev->dev, 2023 dev_err(pctldev->dev, "error claiming hogs: %li\n",
2035 "failed to lookup the sleep state\n"); 2024 PTR_ERR(pctldev->p));
2025
2026 return PTR_ERR(pctldev->p);
2027 }
2028
2029 kref_get(&pctldev->p->users);
2030 pctldev->hog_default =
2031 pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT);
2032 if (IS_ERR(pctldev->hog_default)) {
2033 dev_dbg(pctldev->dev,
2034 "failed to lookup the default state\n");
2035 } else {
2036 if (pinctrl_select_state(pctldev->p,
2037 pctldev->hog_default))
2038 dev_err(pctldev->dev,
2039 "failed to select default state\n");
2040 }
2041
2042 pctldev->hog_sleep =
2043 pinctrl_lookup_state(pctldev->p,
2044 PINCTRL_STATE_SLEEP);
2045 if (IS_ERR(pctldev->hog_sleep))
2046 dev_dbg(pctldev->dev,
2047 "failed to lookup the sleep state\n");
2048
2049 return 0;
2050}
2051
2052int pinctrl_enable(struct pinctrl_dev *pctldev)
2053{
2054 int error;
2055
2056 error = pinctrl_claim_hogs(pctldev);
2057 if (error) {
2058 dev_err(pctldev->dev, "could not claim hogs: %i\n",
2059 error);
2060 mutex_destroy(&pctldev->mutex);
2061 kfree(pctldev);
2062
2063 return error;
2036 } 2064 }
2037 2065
2038 mutex_lock(&pinctrldev_list_mutex); 2066 mutex_lock(&pinctrldev_list_mutex);
@@ -2043,6 +2071,7 @@ static int pinctrl_create_and_start(struct pinctrl_dev *pctldev)
2043 2071
2044 return 0; 2072 return 0;
2045} 2073}
2074EXPORT_SYMBOL_GPL(pinctrl_enable);
2046 2075
2047/** 2076/**
2048 * pinctrl_register() - register a pin controller device 2077 * pinctrl_register() - register a pin controller device
@@ -2065,25 +2094,30 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
2065 if (IS_ERR(pctldev)) 2094 if (IS_ERR(pctldev))
2066 return pctldev; 2095 return pctldev;
2067 2096
2068 error = pinctrl_create_and_start(pctldev); 2097 error = pinctrl_enable(pctldev);
2069 if (error) { 2098 if (error)
2070 mutex_destroy(&pctldev->mutex);
2071 kfree(pctldev);
2072
2073 return ERR_PTR(error); 2099 return ERR_PTR(error);
2074 }
2075 2100
2076 return pctldev; 2101 return pctldev;
2077 2102
2078} 2103}
2079EXPORT_SYMBOL_GPL(pinctrl_register); 2104EXPORT_SYMBOL_GPL(pinctrl_register);
2080 2105
2106/**
2107 * pinctrl_register_and_init() - register and init pin controller device
2108 * @pctldesc: descriptor for this pin controller
2109 * @dev: parent device for this pin controller
2110 * @driver_data: private pin controller data for this pin controller
2111 * @pctldev: pin controller device
2112 *
2113 * Note that pinctrl_enable() still needs to be manually called after
2114 * this once the driver is ready.
2115 */
2081int pinctrl_register_and_init(struct pinctrl_desc *pctldesc, 2116int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
2082 struct device *dev, void *driver_data, 2117 struct device *dev, void *driver_data,
2083 struct pinctrl_dev **pctldev) 2118 struct pinctrl_dev **pctldev)
2084{ 2119{
2085 struct pinctrl_dev *p; 2120 struct pinctrl_dev *p;
2086 int error;
2087 2121
2088 p = pinctrl_init_controller(pctldesc, dev, driver_data); 2122 p = pinctrl_init_controller(pctldesc, dev, driver_data);
2089 if (IS_ERR(p)) 2123 if (IS_ERR(p))
@@ -2097,15 +2131,6 @@ int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
2097 */ 2131 */
2098 *pctldev = p; 2132 *pctldev = p;
2099 2133
2100 error = pinctrl_create_and_start(p);
2101 if (error) {
2102 mutex_destroy(&p->mutex);
2103 kfree(p);
2104 *pctldev = NULL;
2105
2106 return error;
2107 }
2108
2109 return 0; 2134 return 0;
2110} 2135}
2111EXPORT_SYMBOL_GPL(pinctrl_register_and_init); 2136EXPORT_SYMBOL_GPL(pinctrl_register_and_init);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index a7ace9e1ad81..74bd90dfd7b1 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -790,7 +790,7 @@ int imx_pinctrl_probe(struct platform_device *pdev,
790 790
791 dev_info(&pdev->dev, "initialized IMX pinctrl driver\n"); 791 dev_info(&pdev->dev, "initialized IMX pinctrl driver\n");
792 792
793 return 0; 793 return pinctrl_enable(ipctl->pctl);
794 794
795free: 795free:
796 imx_free_resources(ipctl); 796 imx_free_resources(ipctl);
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index f80134e3e0b6..9ff790174906 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -13,6 +13,7 @@
13 * published by the Free Software Foundation. 13 * published by the Free Software Foundation.
14 */ 14 */
15 15
16#include <linux/dmi.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/init.h> 19#include <linux/init.h>
@@ -1524,10 +1525,31 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
1524 chained_irq_exit(chip, desc); 1525 chained_irq_exit(chip, desc);
1525} 1526}
1526 1527
1528/*
1529 * Certain machines seem to hardcode Linux IRQ numbers in their ACPI
1530 * tables. Since we leave GPIOs that are not capable of generating
1531 * interrupts out of the irqdomain the numbering will be different and
1532 * cause devices using the hardcoded IRQ numbers fail. In order not to
1533 * break such machines we will only mask pins from irqdomain if the machine
1534 * is not listed below.
1535 */
1536static const struct dmi_system_id chv_no_valid_mask[] = {
1537 {
1538 /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
1539 .ident = "Acer Chromebook (CYAN)",
1540 .matches = {
1541 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1542 DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
1543 DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"),
1544 },
1545 }
1546};
1547
1527static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) 1548static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1528{ 1549{
1529 const struct chv_gpio_pinrange *range; 1550 const struct chv_gpio_pinrange *range;
1530 struct gpio_chip *chip = &pctrl->chip; 1551 struct gpio_chip *chip = &pctrl->chip;
1552 bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
1531 int ret, i, offset; 1553 int ret, i, offset;
1532 1554
1533 *chip = chv_gpio_chip; 1555 *chip = chv_gpio_chip;
@@ -1536,7 +1558,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1536 chip->label = dev_name(pctrl->dev); 1558 chip->label = dev_name(pctrl->dev);
1537 chip->parent = pctrl->dev; 1559 chip->parent = pctrl->dev;
1538 chip->base = -1; 1560 chip->base = -1;
1539 chip->irq_need_valid_mask = true; 1561 chip->irq_need_valid_mask = need_valid_mask;
1540 1562
1541 ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl); 1563 ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
1542 if (ret) { 1564 if (ret) {
@@ -1567,7 +1589,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1567 intsel &= CHV_PADCTRL0_INTSEL_MASK; 1589 intsel &= CHV_PADCTRL0_INTSEL_MASK;
1568 intsel >>= CHV_PADCTRL0_INTSEL_SHIFT; 1590 intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
1569 1591
1570 if (intsel >= pctrl->community->nirqs) 1592 if (need_valid_mask && intsel >= pctrl->community->nirqs)
1571 clear_bit(i, chip->irq_valid_mask); 1593 clear_bit(i, chip->irq_valid_mask);
1572 } 1594 }
1573 1595
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 7671424d46cb..31a3a98d067c 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -667,11 +667,11 @@ static const char * const uart_ao_b_groups[] = {
667}; 667};
668 668
669static const char * const i2c_ao_groups[] = { 669static const char * const i2c_ao_groups[] = {
670 "i2c_sdk_ao", "i2c_sda_ao", 670 "i2c_sck_ao", "i2c_sda_ao",
671}; 671};
672 672
673static const char * const i2c_slave_ao_groups[] = { 673static const char * const i2c_slave_ao_groups[] = {
674 "i2c_slave_sdk_ao", "i2c_slave_sda_ao", 674 "i2c_slave_sck_ao", "i2c_slave_sda_ao",
675}; 675};
676 676
677static const char * const remote_input_ao_groups[] = { 677static const char * const remote_input_ao_groups[] = {
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 8b2d45e85bae..9c267dcda094 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1781,7 +1781,7 @@ static int pcs_probe(struct platform_device *pdev)
1781 dev_info(pcs->dev, "%i pins at pa %p size %u\n", 1781 dev_info(pcs->dev, "%i pins at pa %p size %u\n",
1782 pcs->desc.npins, pcs->base, pcs->size); 1782 pcs->desc.npins, pcs->base, pcs->size);
1783 1783
1784 return 0; 1784 return pinctrl_enable(pcs->pctl);
1785 1785
1786free: 1786free:
1787 pcs_free_resources(pcs); 1787 pcs_free_resources(pcs);
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 676efcc032d2..3ae8066bc127 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1285,6 +1285,22 @@ static void st_gpio_irq_unmask(struct irq_data *d)
1285 writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK); 1285 writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK);
1286} 1286}
1287 1287
1288static int st_gpio_irq_request_resources(struct irq_data *d)
1289{
1290 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
1291
1292 st_gpio_direction_input(gc, d->hwirq);
1293
1294 return gpiochip_lock_as_irq(gc, d->hwirq);
1295}
1296
1297static void st_gpio_irq_release_resources(struct irq_data *d)
1298{
1299 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
1300
1301 gpiochip_unlock_as_irq(gc, d->hwirq);
1302}
1303
1288static int st_gpio_irq_set_type(struct irq_data *d, unsigned type) 1304static int st_gpio_irq_set_type(struct irq_data *d, unsigned type)
1289{ 1305{
1290 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 1306 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -1438,12 +1454,14 @@ static struct gpio_chip st_gpio_template = {
1438}; 1454};
1439 1455
1440static struct irq_chip st_gpio_irqchip = { 1456static struct irq_chip st_gpio_irqchip = {
1441 .name = "GPIO", 1457 .name = "GPIO",
1442 .irq_disable = st_gpio_irq_mask, 1458 .irq_request_resources = st_gpio_irq_request_resources,
1443 .irq_mask = st_gpio_irq_mask, 1459 .irq_release_resources = st_gpio_irq_release_resources,
1444 .irq_unmask = st_gpio_irq_unmask, 1460 .irq_disable = st_gpio_irq_mask,
1445 .irq_set_type = st_gpio_irq_set_type, 1461 .irq_mask = st_gpio_irq_mask,
1446 .flags = IRQCHIP_SKIP_SET_WAKE, 1462 .irq_unmask = st_gpio_irq_unmask,
1463 .irq_set_type = st_gpio_irq_set_type,
1464 .flags = IRQCHIP_SKIP_SET_WAKE,
1447}; 1465};
1448 1466
1449static int st_gpiolib_register_bank(struct st_pinctrl *info, 1467static int st_gpiolib_register_bank(struct st_pinctrl *info,
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
index b68ae424cee2..743d1f458205 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
@@ -405,6 +405,36 @@ static const struct msm_pingroup ipq4019_groups[] = {
405 PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 405 PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
406 PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 406 PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
407 PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 407 PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
408 PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
409 PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
410 PINGROUP(72, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
411 PINGROUP(73, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
412 PINGROUP(74, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
413 PINGROUP(75, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
414 PINGROUP(76, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
415 PINGROUP(77, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
416 PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
417 PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
418 PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
419 PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
420 PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
421 PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
422 PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
423 PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
424 PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
425 PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
426 PINGROUP(88, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
427 PINGROUP(89, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
428 PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
429 PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
430 PINGROUP(92, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
431 PINGROUP(93, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
432 PINGROUP(94, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
433 PINGROUP(95, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
434 PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
435 PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
436 PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
437 PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
408}; 438};
409 439
410static const struct msm_pinctrl_soc_data ipq4019_pinctrl = { 440static const struct msm_pinctrl_soc_data ipq4019_pinctrl = {
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index c978be5eb9eb..273badd92561 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -609,10 +609,6 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
609 609
610 raw_spin_lock_irqsave(&pctrl->lock, flags); 610 raw_spin_lock_irqsave(&pctrl->lock, flags);
611 611
612 val = readl(pctrl->regs + g->intr_status_reg);
613 val &= ~BIT(g->intr_status_bit);
614 writel(val, pctrl->regs + g->intr_status_reg);
615
616 val = readl(pctrl->regs + g->intr_cfg_reg); 612 val = readl(pctrl->regs + g->intr_cfg_reg);
617 val |= BIT(g->intr_enable_bit); 613 val |= BIT(g->intr_enable_bit);
618 writel(val, pctrl->regs + g->intr_cfg_reg); 614 writel(val, pctrl->regs + g->intr_cfg_reg);
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index f9b49967f512..63e51b56a22a 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -1468,82 +1468,82 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
1468 1468
1469/* pin banks of exynos5433 pin-controller - ALIVE */ 1469/* pin banks of exynos5433 pin-controller - ALIVE */
1470static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = { 1470static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = {
1471 EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00), 1471 EXYNOS5433_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
1472 EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04), 1472 EXYNOS5433_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
1473 EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08), 1473 EXYNOS5433_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
1474 EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c), 1474 EXYNOS5433_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
1475 EXYNOS_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1), 1475 EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
1476 EXYNOS_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1), 1476 EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
1477 EXYNOS_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1), 1477 EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
1478 EXYNOS_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1), 1478 EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
1479 EXYNOS_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1), 1479 EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
1480}; 1480};
1481 1481
1482/* pin banks of exynos5433 pin-controller - AUD */ 1482/* pin banks of exynos5433 pin-controller - AUD */
1483static const struct samsung_pin_bank_data exynos5433_pin_banks1[] __initconst = { 1483static const struct samsung_pin_bank_data exynos5433_pin_banks1[] __initconst = {
1484 EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00), 1484 EXYNOS5433_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
1485 EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04), 1485 EXYNOS5433_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
1486}; 1486};
1487 1487
1488/* pin banks of exynos5433 pin-controller - CPIF */ 1488/* pin banks of exynos5433 pin-controller - CPIF */
1489static const struct samsung_pin_bank_data exynos5433_pin_banks2[] __initconst = { 1489static const struct samsung_pin_bank_data exynos5433_pin_banks2[] __initconst = {
1490 EXYNOS_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00), 1490 EXYNOS5433_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
1491}; 1491};
1492 1492
1493/* pin banks of exynos5433 pin-controller - eSE */ 1493/* pin banks of exynos5433 pin-controller - eSE */
1494static const struct samsung_pin_bank_data exynos5433_pin_banks3[] __initconst = { 1494static const struct samsung_pin_bank_data exynos5433_pin_banks3[] __initconst = {
1495 EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00), 1495 EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
1496}; 1496};
1497 1497
1498/* pin banks of exynos5433 pin-controller - FINGER */ 1498/* pin banks of exynos5433 pin-controller - FINGER */
1499static const struct samsung_pin_bank_data exynos5433_pin_banks4[] __initconst = { 1499static const struct samsung_pin_bank_data exynos5433_pin_banks4[] __initconst = {
1500 EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00), 1500 EXYNOS5433_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
1501}; 1501};
1502 1502
1503/* pin banks of exynos5433 pin-controller - FSYS */ 1503/* pin banks of exynos5433 pin-controller - FSYS */
1504static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst = { 1504static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst = {
1505 EXYNOS_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00), 1505 EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
1506 EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04), 1506 EXYNOS5433_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
1507 EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08), 1507 EXYNOS5433_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
1508 EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c), 1508 EXYNOS5433_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
1509 EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10), 1509 EXYNOS5433_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
1510 EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14), 1510 EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
1511}; 1511};
1512 1512
1513/* pin banks of exynos5433 pin-controller - IMEM */ 1513/* pin banks of exynos5433 pin-controller - IMEM */
1514static const struct samsung_pin_bank_data exynos5433_pin_banks6[] __initconst = { 1514static const struct samsung_pin_bank_data exynos5433_pin_banks6[] __initconst = {
1515 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00), 1515 EXYNOS5433_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
1516}; 1516};
1517 1517
1518/* pin banks of exynos5433 pin-controller - NFC */ 1518/* pin banks of exynos5433 pin-controller - NFC */
1519static const struct samsung_pin_bank_data exynos5433_pin_banks7[] __initconst = { 1519static const struct samsung_pin_bank_data exynos5433_pin_banks7[] __initconst = {
1520 EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00), 1520 EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
1521}; 1521};
1522 1522
1523/* pin banks of exynos5433 pin-controller - PERIC */ 1523/* pin banks of exynos5433 pin-controller - PERIC */
1524static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst = { 1524static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst = {
1525 EXYNOS_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00), 1525 EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
1526 EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04), 1526 EXYNOS5433_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
1527 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08), 1527 EXYNOS5433_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
1528 EXYNOS_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c), 1528 EXYNOS5433_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
1529 EXYNOS_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10), 1529 EXYNOS5433_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
1530 EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14), 1530 EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
1531 EXYNOS_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18), 1531 EXYNOS5433_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
1532 EXYNOS_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c), 1532 EXYNOS5433_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
1533 EXYNOS_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20), 1533 EXYNOS5433_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
1534 EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24), 1534 EXYNOS5433_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
1535 EXYNOS_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28), 1535 EXYNOS5433_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
1536 EXYNOS_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c), 1536 EXYNOS5433_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
1537 EXYNOS_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30), 1537 EXYNOS5433_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
1538 EXYNOS_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34), 1538 EXYNOS5433_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
1539 EXYNOS_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38), 1539 EXYNOS5433_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
1540 EXYNOS_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c), 1540 EXYNOS5433_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
1541 EXYNOS_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40), 1541 EXYNOS5433_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
1542}; 1542};
1543 1543
1544/* pin banks of exynos5433 pin-controller - TOUCH */ 1544/* pin banks of exynos5433 pin-controller - TOUCH */
1545static const struct samsung_pin_bank_data exynos5433_pin_banks9[] __initconst = { 1545static const struct samsung_pin_bank_data exynos5433_pin_banks9[] __initconst = {
1546 EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00), 1546 EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
1547}; 1547};
1548 1548
1549/* 1549/*
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index a473092fb8d2..cd046eb7d705 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -79,17 +79,6 @@
79 .name = id \ 79 .name = id \
80 } 80 }
81 81
82#define EXYNOS_PIN_BANK_EINTW_EXT(pins, reg, id, offs, pctl_idx) \
83 { \
84 .type = &bank_type_alive, \
85 .pctl_offset = reg, \
86 .nr_pins = pins, \
87 .eint_type = EINT_TYPE_WKUP, \
88 .eint_offset = offs, \
89 .name = id, \
90 .pctl_res_idx = pctl_idx, \
91 } \
92
93#define EXYNOS5433_PIN_BANK_EINTG(pins, reg, id, offs) \ 82#define EXYNOS5433_PIN_BANK_EINTG(pins, reg, id, offs) \
94 { \ 83 { \
95 .type = &exynos5433_bank_type_off, \ 84 .type = &exynos5433_bank_type_off, \
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index f9ddba7decc1..d7aa22cff480 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -988,9 +988,16 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
988 988
989 for (i = 0; i < ctrl->nr_ext_resources + 1; i++) { 989 for (i = 0; i < ctrl->nr_ext_resources + 1; i++) {
990 res = platform_get_resource(pdev, IORESOURCE_MEM, i); 990 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
991 virt_base[i] = devm_ioremap_resource(&pdev->dev, res); 991 if (!res) {
992 if (IS_ERR(virt_base[i])) 992 dev_err(&pdev->dev, "failed to get mem%d resource\n", i);
993 return ERR_CAST(virt_base[i]); 993 return ERR_PTR(-EINVAL);
994 }
995 virt_base[i] = devm_ioremap(&pdev->dev, res->start,
996 resource_size(res));
997 if (!virt_base[i]) {
998 dev_err(&pdev->dev, "failed to ioremap %pR\n", res);
999 return ERR_PTR(-EIO);
1000 }
994 } 1001 }
995 1002
996 bank = d->pin_banks; 1003 bank = d->pin_banks;
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index 08150a321be6..a70157f0acf4 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -816,6 +816,13 @@ int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
816 pmx->pctl_desc.pins = pmx->pins; 816 pmx->pctl_desc.pins = pmx->pins;
817 pmx->pctl_desc.npins = pfc->info->nr_pins; 817 pmx->pctl_desc.npins = pfc->info->nr_pins;
818 818
819 return devm_pinctrl_register_and_init(pfc->dev, &pmx->pctl_desc, pmx, 819 ret = devm_pinctrl_register_and_init(pfc->dev, &pmx->pctl_desc, pmx,
820 &pmx->pctl); 820 &pmx->pctl);
821 if (ret) {
822 dev_err(pfc->dev, "could not register: %i\n", ret);
823
824 return ret;
825 }
826
827 return pinctrl_enable(pmx->pctl);
821} 828}
diff --git a/drivers/pinctrl/ti/Kconfig b/drivers/pinctrl/ti/Kconfig
index 815a88673d38..542077069391 100644
--- a/drivers/pinctrl/ti/Kconfig
+++ b/drivers/pinctrl/ti/Kconfig
@@ -1,6 +1,6 @@
1config PINCTRL_TI_IODELAY 1config PINCTRL_TI_IODELAY
2 tristate "TI IODelay Module pinconf driver" 2 tristate "TI IODelay Module pinconf driver"
3 depends on OF 3 depends on OF && (SOC_DRA7XX || COMPILE_TEST)
4 select GENERIC_PINCTRL_GROUPS 4 select GENERIC_PINCTRL_GROUPS
5 select GENERIC_PINMUX_FUNCTIONS 5 select GENERIC_PINMUX_FUNCTIONS
6 select GENERIC_PINCONF 6 select GENERIC_PINCONF
diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
index 717e3404900c..362c50918c13 100644
--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
@@ -893,6 +893,8 @@ static int ti_iodelay_probe(struct platform_device *pdev)
893 893
894 platform_set_drvdata(pdev, iod); 894 platform_set_drvdata(pdev, iod);
895 895
896 return pinctrl_enable(iod->pctl);
897
896exit_out: 898exit_out:
897 of_node_put(np); 899 of_node_put(np);
898 return ret; 900 return ret;
diff --git a/drivers/ptp/ptp_kvm.c b/drivers/ptp/ptp_kvm.c
index 09b4df74291e..bb865695d7a6 100644
--- a/drivers/ptp/ptp_kvm.c
+++ b/drivers/ptp/ptp_kvm.c
@@ -193,10 +193,7 @@ static int __init ptp_kvm_init(void)
193 193
194 kvm_ptp_clock.ptp_clock = ptp_clock_register(&kvm_ptp_clock.caps, NULL); 194 kvm_ptp_clock.ptp_clock = ptp_clock_register(&kvm_ptp_clock.caps, NULL);
195 195
196 if (IS_ERR(kvm_ptp_clock.ptp_clock)) 196 return PTR_ERR_OR_ZERO(kvm_ptp_clock.ptp_clock);
197 return PTR_ERR(kvm_ptp_clock.ptp_clock);
198
199 return 0;
200} 197}
201 198
202module_init(ptp_kvm_init); 199module_init(ptp_kvm_init);
diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c
index 053088b9b66e..c1527cb645be 100644
--- a/drivers/pwm/pwm-lpss-pci.c
+++ b/drivers/pwm/pwm-lpss-pci.c
@@ -36,6 +36,14 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
36 .clk_rate = 19200000, 36 .clk_rate = 19200000,
37 .npwm = 4, 37 .npwm = 4,
38 .base_unit_bits = 22, 38 .base_unit_bits = 22,
39 .bypass = true,
40};
41
42/* Tangier */
43static const struct pwm_lpss_boardinfo pwm_lpss_tng_info = {
44 .clk_rate = 19200000,
45 .npwm = 4,
46 .base_unit_bits = 22,
39}; 47};
40 48
41static int pwm_lpss_probe_pci(struct pci_dev *pdev, 49static int pwm_lpss_probe_pci(struct pci_dev *pdev,
@@ -97,7 +105,7 @@ static const struct pci_device_id pwm_lpss_pci_ids[] = {
97 { PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info}, 105 { PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info},
98 { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info}, 106 { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info},
99 { PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info}, 107 { PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info},
100 { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_bxt_info}, 108 { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_tng_info},
101 { PCI_VDEVICE(INTEL, 0x1ac8), (unsigned long)&pwm_lpss_bxt_info}, 109 { PCI_VDEVICE(INTEL, 0x1ac8), (unsigned long)&pwm_lpss_bxt_info},
102 { PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info}, 110 { PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info},
103 { PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info}, 111 { PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info},
diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c
index b22b6fdadb9a..5d6ed1507d29 100644
--- a/drivers/pwm/pwm-lpss-platform.c
+++ b/drivers/pwm/pwm-lpss-platform.c
@@ -37,6 +37,7 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
37 .clk_rate = 19200000, 37 .clk_rate = 19200000,
38 .npwm = 4, 38 .npwm = 4,
39 .base_unit_bits = 22, 39 .base_unit_bits = 22,
40 .bypass = true,
40}; 41};
41 42
42static int pwm_lpss_probe_platform(struct platform_device *pdev) 43static int pwm_lpss_probe_platform(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 689d2c1cbead..8db0d40ccacd 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -57,7 +57,7 @@ static inline void pwm_lpss_write(const struct pwm_device *pwm, u32 value)
57 writel(value, lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM); 57 writel(value, lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM);
58} 58}
59 59
60static int pwm_lpss_update(struct pwm_device *pwm) 60static int pwm_lpss_wait_for_update(struct pwm_device *pwm)
61{ 61{
62 struct pwm_lpss_chip *lpwm = to_lpwm(pwm->chip); 62 struct pwm_lpss_chip *lpwm = to_lpwm(pwm->chip);
63 const void __iomem *addr = lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM; 63 const void __iomem *addr = lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM;
@@ -65,8 +65,6 @@ static int pwm_lpss_update(struct pwm_device *pwm)
65 u32 val; 65 u32 val;
66 int err; 66 int err;
67 67
68 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
69
70 /* 68 /*
71 * PWM Configuration register has SW_UPDATE bit that is set when a new 69 * PWM Configuration register has SW_UPDATE bit that is set when a new
72 * configuration is written to the register. The bit is automatically 70 * configuration is written to the register. The bit is automatically
@@ -122,6 +120,12 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
122 pwm_lpss_write(pwm, ctrl); 120 pwm_lpss_write(pwm, ctrl);
123} 121}
124 122
123static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond)
124{
125 if (cond)
126 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE);
127}
128
125static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm, 129static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
126 struct pwm_state *state) 130 struct pwm_state *state)
127{ 131{
@@ -137,18 +141,21 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
137 return ret; 141 return ret;
138 } 142 }
139 pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); 143 pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
140 ret = pwm_lpss_update(pwm); 144 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
145 pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false);
146 ret = pwm_lpss_wait_for_update(pwm);
141 if (ret) { 147 if (ret) {
142 pm_runtime_put(chip->dev); 148 pm_runtime_put(chip->dev);
143 return ret; 149 return ret;
144 } 150 }
145 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE); 151 pwm_lpss_cond_enable(pwm, lpwm->info->bypass == true);
146 } else { 152 } else {
147 ret = pwm_lpss_is_updating(pwm); 153 ret = pwm_lpss_is_updating(pwm);
148 if (ret) 154 if (ret)
149 return ret; 155 return ret;
150 pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); 156 pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
151 return pwm_lpss_update(pwm); 157 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
158 return pwm_lpss_wait_for_update(pwm);
152 } 159 }
153 } else if (pwm_is_enabled(pwm)) { 160 } else if (pwm_is_enabled(pwm)) {
154 pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE); 161 pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE);
diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h
index c94cd7c2695d..98306bb02cfe 100644
--- a/drivers/pwm/pwm-lpss.h
+++ b/drivers/pwm/pwm-lpss.h
@@ -22,6 +22,7 @@ struct pwm_lpss_boardinfo {
22 unsigned long clk_rate; 22 unsigned long clk_rate;
23 unsigned int npwm; 23 unsigned int npwm;
24 unsigned long base_unit_bits; 24 unsigned long base_unit_bits;
25 bool bypass;
25}; 26};
26 27
27struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r, 28struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index ef89df1f7336..744d56197286 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -191,6 +191,28 @@ static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
191 return 0; 191 return 0;
192} 192}
193 193
194static int rockchip_pwm_enable(struct pwm_chip *chip,
195 struct pwm_device *pwm,
196 bool enable,
197 enum pwm_polarity polarity)
198{
199 struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
200 int ret;
201
202 if (enable) {
203 ret = clk_enable(pc->clk);
204 if (ret)
205 return ret;
206 }
207
208 pc->data->set_enable(chip, pwm, enable, polarity);
209
210 if (!enable)
211 clk_disable(pc->clk);
212
213 return 0;
214}
215
194static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, 216static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
195 struct pwm_state *state) 217 struct pwm_state *state)
196{ 218{
@@ -207,22 +229,26 @@ static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
207 return ret; 229 return ret;
208 230
209 if (state->polarity != curstate.polarity && enabled) { 231 if (state->polarity != curstate.polarity && enabled) {
210 pc->data->set_enable(chip, pwm, false, state->polarity); 232 ret = rockchip_pwm_enable(chip, pwm, false, state->polarity);
233 if (ret)
234 goto out;
211 enabled = false; 235 enabled = false;
212 } 236 }
213 237
214 ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period); 238 ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period);
215 if (ret) { 239 if (ret) {
216 if (enabled != curstate.enabled) 240 if (enabled != curstate.enabled)
217 pc->data->set_enable(chip, pwm, !enabled, 241 rockchip_pwm_enable(chip, pwm, !enabled,
218 state->polarity); 242 state->polarity);
219
220 goto out; 243 goto out;
221 } 244 }
222 245
223 if (state->enabled != enabled) 246 if (state->enabled != enabled) {
224 pc->data->set_enable(chip, pwm, state->enabled, 247 ret = rockchip_pwm_enable(chip, pwm, state->enabled,
225 state->polarity); 248 state->polarity);
249 if (ret)
250 goto out;
251 }
226 252
227 /* 253 /*
228 * Update the state with the real hardware, which can differ a bit 254 * Update the state with the real hardware, which can differ a bit
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 9d19b9a62011..315a4be8dc1e 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -37,8 +37,8 @@
37#include "tsi721.h" 37#include "tsi721.h"
38 38
39#ifdef DEBUG 39#ifdef DEBUG
40u32 dbg_level; 40u32 tsi_dbg_level;
41module_param(dbg_level, uint, S_IWUSR | S_IRUGO); 41module_param_named(dbg_level, tsi_dbg_level, uint, S_IWUSR | S_IRUGO);
42MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); 42MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
43#endif 43#endif
44 44
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 5941437cbdd1..957eadc58150 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -40,11 +40,11 @@ enum {
40}; 40};
41 41
42#ifdef DEBUG 42#ifdef DEBUG
43extern u32 dbg_level; 43extern u32 tsi_dbg_level;
44 44
45#define tsi_debug(level, dev, fmt, arg...) \ 45#define tsi_debug(level, dev, fmt, arg...) \
46 do { \ 46 do { \
47 if (DBG_##level & dbg_level) \ 47 if (DBG_##level & tsi_dbg_level) \
48 dev_dbg(dev, "%s: " fmt "\n", __func__, ##arg); \ 48 dev_dbg(dev, "%s: " fmt "\n", __func__, ##arg); \
49 } while (0) 49 } while (0)
50#else 50#else
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 65f86bc24c07..1dc43fc5f65f 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -76,7 +76,7 @@ config QCOM_ADSP_PIL
76 depends on OF && ARCH_QCOM 76 depends on OF && ARCH_QCOM
77 depends on REMOTEPROC 77 depends on REMOTEPROC
78 depends on QCOM_SMEM 78 depends on QCOM_SMEM
79 depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) 79 depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n)
80 select MFD_SYSCON 80 select MFD_SYSCON
81 select QCOM_MDT_LOADER 81 select QCOM_MDT_LOADER
82 select QCOM_RPROC_COMMON 82 select QCOM_RPROC_COMMON
@@ -93,7 +93,7 @@ config QCOM_Q6V5_PIL
93 depends on OF && ARCH_QCOM 93 depends on OF && ARCH_QCOM
94 depends on QCOM_SMEM 94 depends on QCOM_SMEM
95 depends on REMOTEPROC 95 depends on REMOTEPROC
96 depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) 96 depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n)
97 select MFD_SYSCON 97 select MFD_SYSCON
98 select QCOM_RPROC_COMMON 98 select QCOM_RPROC_COMMON
99 select QCOM_SCM 99 select QCOM_SCM
@@ -104,7 +104,7 @@ config QCOM_Q6V5_PIL
104config QCOM_WCNSS_PIL 104config QCOM_WCNSS_PIL
105 tristate "Qualcomm WCNSS Peripheral Image Loader" 105 tristate "Qualcomm WCNSS Peripheral Image Loader"
106 depends on OF && ARCH_QCOM 106 depends on OF && ARCH_QCOM
107 depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) 107 depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n)
108 depends on QCOM_SMEM 108 depends on QCOM_SMEM
109 depends on REMOTEPROC 109 depends on REMOTEPROC
110 select QCOM_MDT_LOADER 110 select QCOM_MDT_LOADER
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index f1e5e65388bb..cd739d2fa160 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -275,7 +275,7 @@ int reset_control_status(struct reset_control *rstc)
275} 275}
276EXPORT_SYMBOL_GPL(reset_control_status); 276EXPORT_SYMBOL_GPL(reset_control_status);
277 277
278static struct reset_control *__reset_control_get( 278static struct reset_control *__reset_control_get_internal(
279 struct reset_controller_dev *rcdev, 279 struct reset_controller_dev *rcdev,
280 unsigned int index, bool shared) 280 unsigned int index, bool shared)
281{ 281{
@@ -308,7 +308,7 @@ static struct reset_control *__reset_control_get(
308 return rstc; 308 return rstc;
309} 309}
310 310
311static void __reset_control_put(struct reset_control *rstc) 311static void __reset_control_put_internal(struct reset_control *rstc)
312{ 312{
313 lockdep_assert_held(&reset_list_mutex); 313 lockdep_assert_held(&reset_list_mutex);
314 314
@@ -377,7 +377,7 @@ struct reset_control *__of_reset_control_get(struct device_node *node,
377 } 377 }
378 378
379 /* reset_list_mutex also protects the rcdev's reset_control list */ 379 /* reset_list_mutex also protects the rcdev's reset_control list */
380 rstc = __reset_control_get(rcdev, rstc_id, shared); 380 rstc = __reset_control_get_internal(rcdev, rstc_id, shared);
381 381
382 mutex_unlock(&reset_list_mutex); 382 mutex_unlock(&reset_list_mutex);
383 383
@@ -385,6 +385,17 @@ struct reset_control *__of_reset_control_get(struct device_node *node,
385} 385}
386EXPORT_SYMBOL_GPL(__of_reset_control_get); 386EXPORT_SYMBOL_GPL(__of_reset_control_get);
387 387
388struct reset_control *__reset_control_get(struct device *dev, const char *id,
389 int index, bool shared, bool optional)
390{
391 if (dev->of_node)
392 return __of_reset_control_get(dev->of_node, id, index, shared,
393 optional);
394
395 return optional ? NULL : ERR_PTR(-EINVAL);
396}
397EXPORT_SYMBOL_GPL(__reset_control_get);
398
388/** 399/**
389 * reset_control_put - free the reset controller 400 * reset_control_put - free the reset controller
390 * @rstc: reset controller 401 * @rstc: reset controller
@@ -396,7 +407,7 @@ void reset_control_put(struct reset_control *rstc)
396 return; 407 return;
397 408
398 mutex_lock(&reset_list_mutex); 409 mutex_lock(&reset_list_mutex);
399 __reset_control_put(rstc); 410 __reset_control_put_internal(rstc);
400 mutex_unlock(&reset_list_mutex); 411 mutex_unlock(&reset_list_mutex);
401} 412}
402EXPORT_SYMBOL_GPL(reset_control_put); 413EXPORT_SYMBOL_GPL(reset_control_put);
@@ -417,8 +428,7 @@ struct reset_control *__devm_reset_control_get(struct device *dev,
417 if (!ptr) 428 if (!ptr)
418 return ERR_PTR(-ENOMEM); 429 return ERR_PTR(-ENOMEM);
419 430
420 rstc = __of_reset_control_get(dev ? dev->of_node : NULL, 431 rstc = __reset_control_get(dev, id, index, shared, optional);
421 id, index, shared, optional);
422 if (!IS_ERR(rstc)) { 432 if (!IS_ERR(rstc)) {
423 *ptr = rstc; 433 *ptr = rstc;
424 devres_add(dev, ptr); 434 devres_add(dev, ptr);
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 40f1136f5568..058db724b5a2 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -572,6 +572,12 @@ int pkey_sec2protkey(u16 cardnr, u16 domain,
572 rc = -EIO; 572 rc = -EIO;
573 goto out; 573 goto out;
574 } 574 }
575 if (prepcblk->ccp_rscode != 0) {
576 DEBUG_WARN(
577 "pkey_sec2protkey unwrap secure key warning, card response %d/%d\n",
578 (int) prepcblk->ccp_rtcode,
579 (int) prepcblk->ccp_rscode);
580 }
575 581
576 /* process response cprb param block */ 582 /* process response cprb param block */
577 prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); 583 prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
@@ -761,9 +767,10 @@ out:
761} 767}
762 768
763/* 769/*
764 * Fetch just the mkvp value via query_crypto_facility from adapter. 770 * Fetch the current and old mkvp values via
771 * query_crypto_facility from adapter.
765 */ 772 */
766static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp) 773static int fetch_mkvp(u16 cardnr, u16 domain, u64 mkvp[2])
767{ 774{
768 int rc, found = 0; 775 int rc, found = 0;
769 size_t rlen, vlen; 776 size_t rlen, vlen;
@@ -779,9 +786,10 @@ static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp)
779 rc = query_crypto_facility(cardnr, domain, "STATICSA", 786 rc = query_crypto_facility(cardnr, domain, "STATICSA",
780 rarray, &rlen, varray, &vlen); 787 rarray, &rlen, varray, &vlen);
781 if (rc == 0 && rlen > 8*8 && vlen > 184+8) { 788 if (rc == 0 && rlen > 8*8 && vlen > 184+8) {
782 if (rarray[64] == '2') { 789 if (rarray[8*8] == '2') {
783 /* current master key state is valid */ 790 /* current master key state is valid */
784 *mkvp = *((u64 *)(varray + 184)); 791 mkvp[0] = *((u64 *)(varray + 184));
792 mkvp[1] = *((u64 *)(varray + 172));
785 found = 1; 793 found = 1;
786 } 794 }
787 } 795 }
@@ -796,14 +804,14 @@ struct mkvp_info {
796 struct list_head list; 804 struct list_head list;
797 u16 cardnr; 805 u16 cardnr;
798 u16 domain; 806 u16 domain;
799 u64 mkvp; 807 u64 mkvp[2];
800}; 808};
801 809
802/* a list with mkvp_info entries */ 810/* a list with mkvp_info entries */
803static LIST_HEAD(mkvp_list); 811static LIST_HEAD(mkvp_list);
804static DEFINE_SPINLOCK(mkvp_list_lock); 812static DEFINE_SPINLOCK(mkvp_list_lock);
805 813
806static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp) 814static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 mkvp[2])
807{ 815{
808 int rc = -ENOENT; 816 int rc = -ENOENT;
809 struct mkvp_info *ptr; 817 struct mkvp_info *ptr;
@@ -812,7 +820,7 @@ static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp)
812 list_for_each_entry(ptr, &mkvp_list, list) { 820 list_for_each_entry(ptr, &mkvp_list, list) {
813 if (ptr->cardnr == cardnr && 821 if (ptr->cardnr == cardnr &&
814 ptr->domain == domain) { 822 ptr->domain == domain) {
815 *mkvp = ptr->mkvp; 823 memcpy(mkvp, ptr->mkvp, 2 * sizeof(u64));
816 rc = 0; 824 rc = 0;
817 break; 825 break;
818 } 826 }
@@ -822,7 +830,7 @@ static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp)
822 return rc; 830 return rc;
823} 831}
824 832
825static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp) 833static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp[2])
826{ 834{
827 int found = 0; 835 int found = 0;
828 struct mkvp_info *ptr; 836 struct mkvp_info *ptr;
@@ -831,7 +839,7 @@ static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp)
831 list_for_each_entry(ptr, &mkvp_list, list) { 839 list_for_each_entry(ptr, &mkvp_list, list) {
832 if (ptr->cardnr == cardnr && 840 if (ptr->cardnr == cardnr &&
833 ptr->domain == domain) { 841 ptr->domain == domain) {
834 ptr->mkvp = mkvp; 842 memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
835 found = 1; 843 found = 1;
836 break; 844 break;
837 } 845 }
@@ -844,7 +852,7 @@ static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp)
844 } 852 }
845 ptr->cardnr = cardnr; 853 ptr->cardnr = cardnr;
846 ptr->domain = domain; 854 ptr->domain = domain;
847 ptr->mkvp = mkvp; 855 memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
848 list_add(&ptr->list, &mkvp_list); 856 list_add(&ptr->list, &mkvp_list);
849 } 857 }
850 spin_unlock_bh(&mkvp_list_lock); 858 spin_unlock_bh(&mkvp_list_lock);
@@ -888,8 +896,8 @@ int pkey_findcard(const struct pkey_seckey *seckey,
888 struct secaeskeytoken *t = (struct secaeskeytoken *) seckey; 896 struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
889 struct zcrypt_device_matrix *device_matrix; 897 struct zcrypt_device_matrix *device_matrix;
890 u16 card, dom; 898 u16 card, dom;
891 u64 mkvp; 899 u64 mkvp[2];
892 int i, rc; 900 int i, rc, oi = -1;
893 901
894 /* mkvp must not be zero */ 902 /* mkvp must not be zero */
895 if (t->mkvp == 0) 903 if (t->mkvp == 0)
@@ -910,14 +918,14 @@ int pkey_findcard(const struct pkey_seckey *seckey,
910 device_matrix->device[i].functions & 0x04) { 918 device_matrix->device[i].functions & 0x04) {
911 /* an enabled CCA Coprocessor card */ 919 /* an enabled CCA Coprocessor card */
912 /* try cached mkvp */ 920 /* try cached mkvp */
913 if (mkvp_cache_fetch(card, dom, &mkvp) == 0 && 921 if (mkvp_cache_fetch(card, dom, mkvp) == 0 &&
914 t->mkvp == mkvp) { 922 t->mkvp == mkvp[0]) {
915 if (!verify) 923 if (!verify)
916 break; 924 break;
917 /* verify: fetch mkvp from adapter */ 925 /* verify: fetch mkvp from adapter */
918 if (fetch_mkvp(card, dom, &mkvp) == 0) { 926 if (fetch_mkvp(card, dom, mkvp) == 0) {
919 mkvp_cache_update(card, dom, mkvp); 927 mkvp_cache_update(card, dom, mkvp);
920 if (t->mkvp == mkvp) 928 if (t->mkvp == mkvp[0])
921 break; 929 break;
922 } 930 }
923 } 931 }
@@ -936,14 +944,21 @@ int pkey_findcard(const struct pkey_seckey *seckey,
936 card = AP_QID_CARD(device_matrix->device[i].qid); 944 card = AP_QID_CARD(device_matrix->device[i].qid);
937 dom = AP_QID_QUEUE(device_matrix->device[i].qid); 945 dom = AP_QID_QUEUE(device_matrix->device[i].qid);
938 /* fresh fetch mkvp from adapter */ 946 /* fresh fetch mkvp from adapter */
939 if (fetch_mkvp(card, dom, &mkvp) == 0) { 947 if (fetch_mkvp(card, dom, mkvp) == 0) {
940 mkvp_cache_update(card, dom, mkvp); 948 mkvp_cache_update(card, dom, mkvp);
941 if (t->mkvp == mkvp) 949 if (t->mkvp == mkvp[0])
942 break; 950 break;
951 if (t->mkvp == mkvp[1] && oi < 0)
952 oi = i;
943 } 953 }
944 } 954 }
955 if (i >= MAX_ZDEV_ENTRIES && oi >= 0) {
956 /* old mkvp matched, use this card then */
957 card = AP_QID_CARD(device_matrix->device[oi].qid);
958 dom = AP_QID_QUEUE(device_matrix->device[oi].qid);
959 }
945 } 960 }
946 if (i < MAX_ZDEV_ENTRIES) { 961 if (i < MAX_ZDEV_ENTRIES || oi >= 0) {
947 if (pcardnr) 962 if (pcardnr)
948 *pcardnr = card; 963 *pcardnr = card;
949 if (pdomain) 964 if (pdomain)
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index e7addea8741b..d9561e39c3b2 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -961,7 +961,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
961int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role); 961int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
962int qeth_bridgeport_an_set(struct qeth_card *card, int enable); 962int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
963int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); 963int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
964int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int); 964int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb,
965 int extra_elems, int data_offset);
965int qeth_get_elements_for_frags(struct sk_buff *); 966int qeth_get_elements_for_frags(struct sk_buff *);
966int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, 967int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
967 struct sk_buff *, struct qeth_hdr *, int, int, int); 968 struct sk_buff *, struct qeth_hdr *, int, int, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 315d8a2db7c0..9a5f99ccb122 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3837,6 +3837,7 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
3837 * @card: qeth card structure, to check max. elems. 3837 * @card: qeth card structure, to check max. elems.
3838 * @skb: SKB address 3838 * @skb: SKB address
3839 * @extra_elems: extra elems needed, to check against max. 3839 * @extra_elems: extra elems needed, to check against max.
3840 * @data_offset: range starts at skb->data + data_offset
3840 * 3841 *
3841 * Returns the number of pages, and thus QDIO buffer elements, needed to cover 3842 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3842 * skb data, including linear part and fragments. Checks if the result plus 3843 * skb data, including linear part and fragments. Checks if the result plus
@@ -3844,10 +3845,10 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
3844 * Note: extra_elems is not included in the returned result. 3845 * Note: extra_elems is not included in the returned result.
3845 */ 3846 */
3846int qeth_get_elements_no(struct qeth_card *card, 3847int qeth_get_elements_no(struct qeth_card *card,
3847 struct sk_buff *skb, int extra_elems) 3848 struct sk_buff *skb, int extra_elems, int data_offset)
3848{ 3849{
3849 int elements = qeth_get_elements_for_range( 3850 int elements = qeth_get_elements_for_range(
3850 (addr_t)skb->data, 3851 (addr_t)skb->data + data_offset,
3851 (addr_t)skb->data + skb_headlen(skb)) + 3852 (addr_t)skb->data + skb_headlen(skb)) +
3852 qeth_get_elements_for_frags(skb); 3853 qeth_get_elements_for_frags(skb);
3853 3854
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index bea483307618..af4e6a639fec 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -849,7 +849,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
849 * chaining we can not send long frag lists 849 * chaining we can not send long frag lists
850 */ 850 */
851 if ((card->info.type != QETH_CARD_TYPE_IQD) && 851 if ((card->info.type != QETH_CARD_TYPE_IQD) &&
852 !qeth_get_elements_no(card, new_skb, 0)) { 852 !qeth_get_elements_no(card, new_skb, 0, 0)) {
853 int lin_rc = skb_linearize(new_skb); 853 int lin_rc = skb_linearize(new_skb);
854 854
855 if (card->options.performance_stats) { 855 if (card->options.performance_stats) {
@@ -894,7 +894,8 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
894 } 894 }
895 } 895 }
896 896
897 elements = qeth_get_elements_no(card, new_skb, elements_needed); 897 elements = qeth_get_elements_no(card, new_skb, elements_needed,
898 (data_offset > 0) ? data_offset : 0);
898 if (!elements) { 899 if (!elements) {
899 if (data_offset >= 0) 900 if (data_offset >= 0)
900 kmem_cache_free(qeth_core_header_cache, hdr); 901 kmem_cache_free(qeth_core_header_cache, hdr);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 06d0addcc058..653f0fb76573 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2609,17 +2609,13 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
2609 char daddr[16]; 2609 char daddr[16];
2610 struct af_iucv_trans_hdr *iucv_hdr; 2610 struct af_iucv_trans_hdr *iucv_hdr;
2611 2611
2612 skb_pull(skb, 14);
2613 card->dev->header_ops->create(skb, card->dev, 0,
2614 card->dev->dev_addr, card->dev->dev_addr,
2615 card->dev->addr_len);
2616 skb_pull(skb, 14);
2617 iucv_hdr = (struct af_iucv_trans_hdr *)skb->data;
2618 memset(hdr, 0, sizeof(struct qeth_hdr)); 2612 memset(hdr, 0, sizeof(struct qeth_hdr));
2619 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; 2613 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
2620 hdr->hdr.l3.ext_flags = 0; 2614 hdr->hdr.l3.ext_flags = 0;
2621 hdr->hdr.l3.length = skb->len; 2615 hdr->hdr.l3.length = skb->len - ETH_HLEN;
2622 hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST; 2616 hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
2617
2618 iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN);
2623 memset(daddr, 0, sizeof(daddr)); 2619 memset(daddr, 0, sizeof(daddr));
2624 daddr[0] = 0xfe; 2620 daddr[0] = 0xfe;
2625 daddr[1] = 0x80; 2621 daddr[1] = 0x80;
@@ -2823,10 +2819,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2823 if ((card->info.type == QETH_CARD_TYPE_IQD) && 2819 if ((card->info.type == QETH_CARD_TYPE_IQD) &&
2824 !skb_is_nonlinear(skb)) { 2820 !skb_is_nonlinear(skb)) {
2825 new_skb = skb; 2821 new_skb = skb;
2826 if (new_skb->protocol == ETH_P_AF_IUCV) 2822 data_offset = ETH_HLEN;
2827 data_offset = 0;
2828 else
2829 data_offset = ETH_HLEN;
2830 hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); 2823 hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
2831 if (!hdr) 2824 if (!hdr)
2832 goto tx_drop; 2825 goto tx_drop;
@@ -2867,7 +2860,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2867 */ 2860 */
2868 if ((card->info.type != QETH_CARD_TYPE_IQD) && 2861 if ((card->info.type != QETH_CARD_TYPE_IQD) &&
2869 ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) || 2862 ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
2870 (!use_tso && !qeth_get_elements_no(card, new_skb, 0)))) { 2863 (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) {
2871 int lin_rc = skb_linearize(new_skb); 2864 int lin_rc = skb_linearize(new_skb);
2872 2865
2873 if (card->options.performance_stats) { 2866 if (card->options.performance_stats) {
@@ -2909,7 +2902,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2909 2902
2910 elements = use_tso ? 2903 elements = use_tso ?
2911 qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) : 2904 qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
2912 qeth_get_elements_no(card, new_skb, hdr_elements); 2905 qeth_get_elements_no(card, new_skb, hdr_elements,
2906 (data_offset > 0) ? data_offset : 0);
2913 if (!elements) { 2907 if (!elements) {
2914 if (data_offset >= 0) 2908 if (data_offset >= 0)
2915 kmem_cache_free(qeth_core_header_cache, hdr); 2909 kmem_cache_free(qeth_core_header_cache, hdr);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 4bf55b5d78be..3c52867dfe28 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1253,20 +1253,6 @@ config SCSI_LPFC_DEBUG_FS
1253 This makes debugging information from the lpfc driver 1253 This makes debugging information from the lpfc driver
1254 available via the debugfs filesystem. 1254 available via the debugfs filesystem.
1255 1255
1256config LPFC_NVME_INITIATOR
1257 bool "Emulex LightPulse Fibre Channel NVME Initiator Support"
1258 depends on SCSI_LPFC && NVME_FC
1259 ---help---
1260 This enables NVME Initiator support in the Emulex lpfc driver.
1261
1262config LPFC_NVME_TARGET
1263 bool "Emulex LightPulse Fibre Channel NVME Initiator Support"
1264 depends on SCSI_LPFC && NVME_TARGET_FC
1265 ---help---
1266 This enables NVME Target support in the Emulex lpfc driver.
1267 Target enablement must still be enabled on a per adapter
1268 basis by module parameters.
1269
1270config SCSI_SIM710 1256config SCSI_SIM710
1271 tristate "Simple 53c710 SCSI support (Compaq, NCR machines)" 1257 tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
1272 depends on (EISA || MCA) && SCSI 1258 depends on (EISA || MCA) && SCSI
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index d036a806f31c..d281492009fb 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1690,9 +1690,6 @@ struct aac_dev
1690#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \ 1690#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \
1691 (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) 1691 (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4)
1692 1692
1693#define aac_adapter_check_health(dev) \
1694 (dev)->a_ops.adapter_check_health(dev)
1695
1696#define aac_adapter_restart(dev, bled, reset_type) \ 1693#define aac_adapter_restart(dev, bled, reset_type) \
1697 ((dev)->a_ops.adapter_restart(dev, bled, reset_type)) 1694 ((dev)->a_ops.adapter_restart(dev, bled, reset_type))
1698 1695
@@ -2615,6 +2612,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
2615 return capacity; 2612 return capacity;
2616} 2613}
2617 2614
2615static inline int aac_adapter_check_health(struct aac_dev *dev)
2616{
2617 if (unlikely(pci_channel_offline(dev->pdev)))
2618 return -1;
2619
2620 return (dev)->a_ops.adapter_check_health(dev);
2621}
2622
2618/* SCp.phase values */ 2623/* SCp.phase values */
2619#define AAC_OWNER_MIDLEVEL 0x101 2624#define AAC_OWNER_MIDLEVEL 0x101
2620#define AAC_OWNER_LOWLEVEL 0x102 2625#define AAC_OWNER_LOWLEVEL 0x102
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index a3ad04293487..1f4918355fdb 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1873,7 +1873,8 @@ int aac_check_health(struct aac_dev * aac)
1873 spin_unlock_irqrestore(&aac->fib_lock, flagv); 1873 spin_unlock_irqrestore(&aac->fib_lock, flagv);
1874 1874
1875 if (BlinkLED < 0) { 1875 if (BlinkLED < 0) {
1876 printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED); 1876 printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n",
1877 aac->name, BlinkLED);
1877 goto out; 1878 goto out;
1878 } 1879 }
1879 1880
@@ -2056,7 +2057,6 @@ static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
2056{ 2057{
2057 struct hw_fib **hw_fib_p; 2058 struct hw_fib **hw_fib_p;
2058 struct fib **fib_p; 2059 struct fib **fib_p;
2059 int rcode = 1;
2060 2060
2061 hw_fib_p = hw_fib_pool; 2061 hw_fib_p = hw_fib_pool;
2062 fib_p = fib_pool; 2062 fib_p = fib_pool;
@@ -2074,11 +2074,11 @@ static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
2074 } 2074 }
2075 } 2075 }
2076 2076
2077 /*
2078 * Get the actual number of allocated fibs
2079 */
2077 num = hw_fib_p - hw_fib_pool; 2080 num = hw_fib_p - hw_fib_pool;
2078 if (!num) 2081 return num;
2079 rcode = 0;
2080
2081 return rcode;
2082} 2082}
2083 2083
2084static void wakeup_fibctx_threads(struct aac_dev *dev, 2084static void wakeup_fibctx_threads(struct aac_dev *dev,
@@ -2186,7 +2186,6 @@ static void aac_process_events(struct aac_dev *dev)
2186 struct fib *fib; 2186 struct fib *fib;
2187 unsigned long flags; 2187 unsigned long flags;
2188 spinlock_t *t_lock; 2188 spinlock_t *t_lock;
2189 unsigned int rcode;
2190 2189
2191 t_lock = dev->queues->queue[HostNormCmdQueue].lock; 2190 t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2192 spin_lock_irqsave(t_lock, flags); 2191 spin_lock_irqsave(t_lock, flags);
@@ -2269,8 +2268,8 @@ static void aac_process_events(struct aac_dev *dev)
2269 * Fill up fib pointer pools with actual fibs 2268 * Fill up fib pointer pools with actual fibs
2270 * and hw_fibs 2269 * and hw_fibs
2271 */ 2270 */
2272 rcode = fillup_pools(dev, hw_fib_pool, fib_pool, num); 2271 num = fillup_pools(dev, hw_fib_pool, fib_pool, num);
2273 if (!rcode) 2272 if (!num)
2274 goto free_mem; 2273 goto free_mem;
2275 2274
2276 /* 2275 /*
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 48e200102221..c01b47e5b55a 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -113,7 +113,7 @@ struct alua_queue_data {
113#define ALUA_POLICY_SWITCH_ALL 1 113#define ALUA_POLICY_SWITCH_ALL 1
114 114
115static void alua_rtpg_work(struct work_struct *work); 115static void alua_rtpg_work(struct work_struct *work);
116static void alua_rtpg_queue(struct alua_port_group *pg, 116static bool alua_rtpg_queue(struct alua_port_group *pg,
117 struct scsi_device *sdev, 117 struct scsi_device *sdev,
118 struct alua_queue_data *qdata, bool force); 118 struct alua_queue_data *qdata, bool force);
119static void alua_check(struct scsi_device *sdev, bool force); 119static void alua_check(struct scsi_device *sdev, bool force);
@@ -862,7 +862,13 @@ static void alua_rtpg_work(struct work_struct *work)
862 kref_put(&pg->kref, release_port_group); 862 kref_put(&pg->kref, release_port_group);
863} 863}
864 864
865static void alua_rtpg_queue(struct alua_port_group *pg, 865/**
866 * alua_rtpg_queue() - cause RTPG to be submitted asynchronously
867 *
868 * Returns true if and only if alua_rtpg_work() will be called asynchronously.
869 * That function is responsible for calling @qdata->fn().
870 */
871static bool alua_rtpg_queue(struct alua_port_group *pg,
866 struct scsi_device *sdev, 872 struct scsi_device *sdev,
867 struct alua_queue_data *qdata, bool force) 873 struct alua_queue_data *qdata, bool force)
868{ 874{
@@ -870,8 +876,8 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
870 unsigned long flags; 876 unsigned long flags;
871 struct workqueue_struct *alua_wq = kaluad_wq; 877 struct workqueue_struct *alua_wq = kaluad_wq;
872 878
873 if (!pg) 879 if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev))
874 return; 880 return false;
875 881
876 spin_lock_irqsave(&pg->lock, flags); 882 spin_lock_irqsave(&pg->lock, flags);
877 if (qdata) { 883 if (qdata) {
@@ -884,14 +890,12 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
884 pg->flags |= ALUA_PG_RUN_RTPG; 890 pg->flags |= ALUA_PG_RUN_RTPG;
885 kref_get(&pg->kref); 891 kref_get(&pg->kref);
886 pg->rtpg_sdev = sdev; 892 pg->rtpg_sdev = sdev;
887 scsi_device_get(sdev);
888 start_queue = 1; 893 start_queue = 1;
889 } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) { 894 } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) {
890 pg->flags |= ALUA_PG_RUN_RTPG; 895 pg->flags |= ALUA_PG_RUN_RTPG;
891 /* Do not queue if the worker is already running */ 896 /* Do not queue if the worker is already running */
892 if (!(pg->flags & ALUA_PG_RUNNING)) { 897 if (!(pg->flags & ALUA_PG_RUNNING)) {
893 kref_get(&pg->kref); 898 kref_get(&pg->kref);
894 sdev = NULL;
895 start_queue = 1; 899 start_queue = 1;
896 } 900 }
897 } 901 }
@@ -900,13 +904,17 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
900 alua_wq = kaluad_sync_wq; 904 alua_wq = kaluad_sync_wq;
901 spin_unlock_irqrestore(&pg->lock, flags); 905 spin_unlock_irqrestore(&pg->lock, flags);
902 906
903 if (start_queue && 907 if (start_queue) {
904 !queue_delayed_work(alua_wq, &pg->rtpg_work, 908 if (queue_delayed_work(alua_wq, &pg->rtpg_work,
905 msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) { 909 msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS)))
906 if (sdev) 910 sdev = NULL;
907 scsi_device_put(sdev); 911 else
908 kref_put(&pg->kref, release_port_group); 912 kref_put(&pg->kref, release_port_group);
909 } 913 }
914 if (sdev)
915 scsi_device_put(sdev);
916
917 return true;
910} 918}
911 919
912/* 920/*
@@ -1007,11 +1015,13 @@ static int alua_activate(struct scsi_device *sdev,
1007 mutex_unlock(&h->init_mutex); 1015 mutex_unlock(&h->init_mutex);
1008 goto out; 1016 goto out;
1009 } 1017 }
1010 fn = NULL;
1011 rcu_read_unlock(); 1018 rcu_read_unlock();
1012 mutex_unlock(&h->init_mutex); 1019 mutex_unlock(&h->init_mutex);
1013 1020
1014 alua_rtpg_queue(pg, sdev, qdata, true); 1021 if (alua_rtpg_queue(pg, sdev, qdata, true))
1022 fn = NULL;
1023 else
1024 err = SCSI_DH_DEV_OFFLINED;
1015 kref_put(&pg->kref, release_port_group); 1025 kref_put(&pg->kref, release_port_group);
1016out: 1026out:
1017 if (fn) 1027 if (fn)
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 524a0c755ed7..9d659aaace15 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2956,7 +2956,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2956 /* fill_cmd can't fail here, no data buffer to map. */ 2956 /* fill_cmd can't fail here, no data buffer to map. */
2957 (void) fill_cmd(c, reset_type, h, NULL, 0, 0, 2957 (void) fill_cmd(c, reset_type, h, NULL, 0, 0,
2958 scsi3addr, TYPE_MSG); 2958 scsi3addr, TYPE_MSG);
2959 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); 2959 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
2960 if (rc) { 2960 if (rc) {
2961 dev_warn(&h->pdev->dev, "Failed to send reset command\n"); 2961 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2962 goto out; 2962 goto out;
@@ -3714,7 +3714,7 @@ exit_failed:
3714 * # (integer code indicating one of several NOT READY states 3714 * # (integer code indicating one of several NOT READY states
3715 * describing why a volume is to be kept offline) 3715 * describing why a volume is to be kept offline)
3716 */ 3716 */
3717static int hpsa_volume_offline(struct ctlr_info *h, 3717static unsigned char hpsa_volume_offline(struct ctlr_info *h,
3718 unsigned char scsi3addr[]) 3718 unsigned char scsi3addr[])
3719{ 3719{
3720 struct CommandList *c; 3720 struct CommandList *c;
@@ -3735,7 +3735,7 @@ static int hpsa_volume_offline(struct ctlr_info *h,
3735 DEFAULT_TIMEOUT); 3735 DEFAULT_TIMEOUT);
3736 if (rc) { 3736 if (rc) {
3737 cmd_free(h, c); 3737 cmd_free(h, c);
3738 return 0; 3738 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3739 } 3739 }
3740 sense = c->err_info->SenseInfo; 3740 sense = c->err_info->SenseInfo;
3741 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) 3741 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
@@ -3746,19 +3746,13 @@ static int hpsa_volume_offline(struct ctlr_info *h,
3746 cmd_status = c->err_info->CommandStatus; 3746 cmd_status = c->err_info->CommandStatus;
3747 scsi_status = c->err_info->ScsiStatus; 3747 scsi_status = c->err_info->ScsiStatus;
3748 cmd_free(h, c); 3748 cmd_free(h, c);
3749 /* Is the volume 'not ready'? */
3750 if (cmd_status != CMD_TARGET_STATUS ||
3751 scsi_status != SAM_STAT_CHECK_CONDITION ||
3752 sense_key != NOT_READY ||
3753 asc != ASC_LUN_NOT_READY) {
3754 return 0;
3755 }
3756 3749
3757 /* Determine the reason for not ready state */ 3750 /* Determine the reason for not ready state */
3758 ldstat = hpsa_get_volume_status(h, scsi3addr); 3751 ldstat = hpsa_get_volume_status(h, scsi3addr);
3759 3752
3760 /* Keep volume offline in certain cases: */ 3753 /* Keep volume offline in certain cases: */
3761 switch (ldstat) { 3754 switch (ldstat) {
3755 case HPSA_LV_FAILED:
3762 case HPSA_LV_UNDERGOING_ERASE: 3756 case HPSA_LV_UNDERGOING_ERASE:
3763 case HPSA_LV_NOT_AVAILABLE: 3757 case HPSA_LV_NOT_AVAILABLE:
3764 case HPSA_LV_UNDERGOING_RPI: 3758 case HPSA_LV_UNDERGOING_RPI:
@@ -3780,7 +3774,7 @@ static int hpsa_volume_offline(struct ctlr_info *h,
3780 default: 3774 default:
3781 break; 3775 break;
3782 } 3776 }
3783 return 0; 3777 return HPSA_LV_OK;
3784} 3778}
3785 3779
3786/* 3780/*
@@ -3853,10 +3847,10 @@ static int hpsa_update_device_info(struct ctlr_info *h,
3853 /* Do an inquiry to the device to see what it is. */ 3847 /* Do an inquiry to the device to see what it is. */
3854 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, 3848 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3855 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { 3849 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3856 /* Inquiry failed (msg printed already) */
3857 dev_err(&h->pdev->dev, 3850 dev_err(&h->pdev->dev,
3858 "hpsa_update_device_info: inquiry failed\n"); 3851 "%s: inquiry failed, device will be skipped.\n",
3859 rc = -EIO; 3852 __func__);
3853 rc = HPSA_INQUIRY_FAILED;
3860 goto bail_out; 3854 goto bail_out;
3861 } 3855 }
3862 3856
@@ -3885,15 +3879,20 @@ static int hpsa_update_device_info(struct ctlr_info *h,
3885 if ((this_device->devtype == TYPE_DISK || 3879 if ((this_device->devtype == TYPE_DISK ||
3886 this_device->devtype == TYPE_ZBC) && 3880 this_device->devtype == TYPE_ZBC) &&
3887 is_logical_dev_addr_mode(scsi3addr)) { 3881 is_logical_dev_addr_mode(scsi3addr)) {
3888 int volume_offline; 3882 unsigned char volume_offline;
3889 3883
3890 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); 3884 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
3891 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) 3885 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3892 hpsa_get_ioaccel_status(h, scsi3addr, this_device); 3886 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
3893 volume_offline = hpsa_volume_offline(h, scsi3addr); 3887 volume_offline = hpsa_volume_offline(h, scsi3addr);
3894 if (volume_offline < 0 || volume_offline > 0xff) 3888 this_device->volume_offline = volume_offline;
3895 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED; 3889 if (volume_offline == HPSA_LV_FAILED) {
3896 this_device->volume_offline = volume_offline & 0xff; 3890 rc = HPSA_LV_FAILED;
3891 dev_err(&h->pdev->dev,
3892 "%s: LV failed, device will be skipped.\n",
3893 __func__);
3894 goto bail_out;
3895 }
3897 } else { 3896 } else {
3898 this_device->raid_level = RAID_UNKNOWN; 3897 this_device->raid_level = RAID_UNKNOWN;
3899 this_device->offload_config = 0; 3898 this_device->offload_config = 0;
@@ -4379,8 +4378,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
4379 goto out; 4378 goto out;
4380 } 4379 }
4381 if (rc) { 4380 if (rc) {
4382 dev_warn(&h->pdev->dev, 4381 h->drv_req_rescan = 1;
4383 "Inquiry failed, skipping device.\n");
4384 continue; 4382 continue;
4385 } 4383 }
4386 4384
@@ -5558,7 +5556,7 @@ static void hpsa_scan_complete(struct ctlr_info *h)
5558 5556
5559 spin_lock_irqsave(&h->scan_lock, flags); 5557 spin_lock_irqsave(&h->scan_lock, flags);
5560 h->scan_finished = 1; 5558 h->scan_finished = 1;
5561 wake_up_all(&h->scan_wait_queue); 5559 wake_up(&h->scan_wait_queue);
5562 spin_unlock_irqrestore(&h->scan_lock, flags); 5560 spin_unlock_irqrestore(&h->scan_lock, flags);
5563} 5561}
5564 5562
@@ -5576,11 +5574,23 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
5576 if (unlikely(lockup_detected(h))) 5574 if (unlikely(lockup_detected(h)))
5577 return hpsa_scan_complete(h); 5575 return hpsa_scan_complete(h);
5578 5576
5577 /*
5578 * If a scan is already waiting to run, no need to add another
5579 */
5580 spin_lock_irqsave(&h->scan_lock, flags);
5581 if (h->scan_waiting) {
5582 spin_unlock_irqrestore(&h->scan_lock, flags);
5583 return;
5584 }
5585
5586 spin_unlock_irqrestore(&h->scan_lock, flags);
5587
5579 /* wait until any scan already in progress is finished. */ 5588 /* wait until any scan already in progress is finished. */
5580 while (1) { 5589 while (1) {
5581 spin_lock_irqsave(&h->scan_lock, flags); 5590 spin_lock_irqsave(&h->scan_lock, flags);
5582 if (h->scan_finished) 5591 if (h->scan_finished)
5583 break; 5592 break;
5593 h->scan_waiting = 1;
5584 spin_unlock_irqrestore(&h->scan_lock, flags); 5594 spin_unlock_irqrestore(&h->scan_lock, flags);
5585 wait_event(h->scan_wait_queue, h->scan_finished); 5595 wait_event(h->scan_wait_queue, h->scan_finished);
5586 /* Note: We don't need to worry about a race between this 5596 /* Note: We don't need to worry about a race between this
@@ -5590,6 +5600,7 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
5590 */ 5600 */
5591 } 5601 }
5592 h->scan_finished = 0; /* mark scan as in progress */ 5602 h->scan_finished = 0; /* mark scan as in progress */
5603 h->scan_waiting = 0;
5593 spin_unlock_irqrestore(&h->scan_lock, flags); 5604 spin_unlock_irqrestore(&h->scan_lock, flags);
5594 5605
5595 if (unlikely(lockup_detected(h))) 5606 if (unlikely(lockup_detected(h)))
@@ -8792,6 +8803,7 @@ reinit_after_soft_reset:
8792 init_waitqueue_head(&h->event_sync_wait_queue); 8803 init_waitqueue_head(&h->event_sync_wait_queue);
8793 mutex_init(&h->reset_mutex); 8804 mutex_init(&h->reset_mutex);
8794 h->scan_finished = 1; /* no scan currently in progress */ 8805 h->scan_finished = 1; /* no scan currently in progress */
8806 h->scan_waiting = 0;
8795 8807
8796 pci_set_drvdata(pdev, h); 8808 pci_set_drvdata(pdev, h);
8797 h->ndevices = 0; 8809 h->ndevices = 0;
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index bf6cdc106654..6f04f2ad4125 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -201,6 +201,7 @@ struct ctlr_info {
201 dma_addr_t errinfo_pool_dhandle; 201 dma_addr_t errinfo_pool_dhandle;
202 unsigned long *cmd_pool_bits; 202 unsigned long *cmd_pool_bits;
203 int scan_finished; 203 int scan_finished;
204 u8 scan_waiting : 1;
204 spinlock_t scan_lock; 205 spinlock_t scan_lock;
205 wait_queue_head_t scan_wait_queue; 206 wait_queue_head_t scan_wait_queue;
206 207
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index a584cdf07058..5961705eef76 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -156,6 +156,7 @@
156#define CFGTBL_BusType_Fibre2G 0x00000200l 156#define CFGTBL_BusType_Fibre2G 0x00000200l
157 157
158/* VPD Inquiry types */ 158/* VPD Inquiry types */
159#define HPSA_INQUIRY_FAILED 0x02
159#define HPSA_VPD_SUPPORTED_PAGES 0x00 160#define HPSA_VPD_SUPPORTED_PAGES 0x00
160#define HPSA_VPD_LV_DEVICE_ID 0x83 161#define HPSA_VPD_LV_DEVICE_ID 0x83
161#define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1 162#define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1
@@ -166,6 +167,7 @@
166/* Logical volume states */ 167/* Logical volume states */
167#define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff 168#define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff
168#define HPSA_LV_OK 0x0 169#define HPSA_LV_OK 0x0
170#define HPSA_LV_FAILED 0x01
169#define HPSA_LV_NOT_AVAILABLE 0x0b 171#define HPSA_LV_NOT_AVAILABLE 0x0b
170#define HPSA_LV_UNDERGOING_ERASE 0x0F 172#define HPSA_LV_UNDERGOING_ERASE 0x0F
171#define HPSA_LV_UNDERGOING_RPI 0x12 173#define HPSA_LV_UNDERGOING_RPI 0x12
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index b29afafc2885..5d5e272fd815 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6293,7 +6293,12 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6293 break; 6293 break;
6294 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */ 6294 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6295 case IPR_IOASA_IR_DUAL_IOA_DISABLED: 6295 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6296 scsi_cmd->result |= (DID_PASSTHROUGH << 16); 6296 /*
6297 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6298 * so SCSI mid-layer and upper layers handle it accordingly.
6299 */
6300 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6301 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6297 break; 6302 break;
6298 case IPR_IOASC_BUS_WAS_RESET: 6303 case IPR_IOASC_BUS_WAS_RESET:
6299 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER: 6304 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 763f012fdeca..87f5e694dbed 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -221,7 +221,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
221 task->num_scatter = qc->n_elem; 221 task->num_scatter = qc->n_elem;
222 } else { 222 } else {
223 for_each_sg(qc->sg, sg, qc->n_elem, si) 223 for_each_sg(qc->sg, sg, qc->n_elem, si)
224 xfer += sg->length; 224 xfer += sg_dma_len(sg);
225 225
226 task->total_xfer_len = xfer; 226 task->total_xfer_len = xfer;
227 task->num_scatter = si; 227 task->num_scatter = si;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 5c3be3e6f5e2..22819afbaef5 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3315,9 +3315,9 @@ LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST,
3315 * lpfc_enable_fc4_type: Defines what FC4 types are supported. 3315 * lpfc_enable_fc4_type: Defines what FC4 types are supported.
3316 * Supported Values: 1 - register just FCP 3316 * Supported Values: 1 - register just FCP
3317 * 3 - register both FCP and NVME 3317 * 3 - register both FCP and NVME
3318 * Supported values are [1,3]. Default value is 3 3318 * Supported values are [1,3]. Default value is 1
3319 */ 3319 */
3320LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH, 3320LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP,
3321 LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH, 3321 LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
3322 "Define fc4 type to register with fabric."); 3322 "Define fc4 type to register with fabric.");
3323 3323
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index c05f56c3023f..7b7d314af0e0 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -44,14 +44,6 @@
44/* hbqinfo output buffer size */ 44/* hbqinfo output buffer size */
45#define LPFC_HBQINFO_SIZE 8192 45#define LPFC_HBQINFO_SIZE 8192
46 46
47enum {
48 DUMP_FCP,
49 DUMP_NVME,
50 DUMP_MBX,
51 DUMP_ELS,
52 DUMP_NVMELS,
53};
54
55/* nvmestat output buffer size */ 47/* nvmestat output buffer size */
56#define LPFC_NVMESTAT_SIZE 8192 48#define LPFC_NVMESTAT_SIZE 8192
57#define LPFC_NVMEKTIME_SIZE 8192 49#define LPFC_NVMEKTIME_SIZE 8192
@@ -283,8 +275,22 @@ struct lpfc_idiag {
283 struct lpfc_idiag_offset offset; 275 struct lpfc_idiag_offset offset;
284 void *ptr_private; 276 void *ptr_private;
285}; 277};
278
279#else
280
281#define lpfc_nvmeio_data(phba, fmt, arg...) \
282 no_printk(fmt, ##arg)
283
286#endif 284#endif
287 285
286enum {
287 DUMP_FCP,
288 DUMP_NVME,
289 DUMP_MBX,
290 DUMP_ELS,
291 DUMP_NVMELS,
292};
293
288/* Mask for discovery_trace */ 294/* Mask for discovery_trace */
289#define LPFC_DISC_TRC_ELS_CMD 0x1 /* Trace ELS commands */ 295#define LPFC_DISC_TRC_ELS_CMD 0x1 /* Trace ELS commands */
290#define LPFC_DISC_TRC_ELS_RSP 0x2 /* Trace ELS response */ 296#define LPFC_DISC_TRC_ELS_RSP 0x2 /* Trace ELS response */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index d9c61d030034..a5ca37e45fb6 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -7968,7 +7968,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7968 did, vport->port_state, ndlp->nlp_flag); 7968 did, vport->port_state, ndlp->nlp_flag);
7969 7969
7970 phba->fc_stat.elsRcvPRLI++; 7970 phba->fc_stat.elsRcvPRLI++;
7971 if (vport->port_state < LPFC_DISC_AUTH) { 7971 if ((vport->port_state < LPFC_DISC_AUTH) &&
7972 (vport->fc_flag & FC_FABRIC)) {
7972 rjt_err = LSRJT_UNABLE_TPC; 7973 rjt_err = LSRJT_UNABLE_TPC;
7973 rjt_exp = LSEXP_NOTHING_MORE; 7974 rjt_exp = LSEXP_NOTHING_MORE;
7974 break; 7975 break;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 2697d49da4d7..6cc561b04211 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -5891,10 +5891,17 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5891 /* Check to see if it matches any module parameter */ 5891 /* Check to see if it matches any module parameter */
5892 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 5892 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
5893 if (wwn == lpfc_enable_nvmet[i]) { 5893 if (wwn == lpfc_enable_nvmet[i]) {
5894#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
5894 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5895 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5895 "6017 NVME Target %016llx\n", 5896 "6017 NVME Target %016llx\n",
5896 wwn); 5897 wwn);
5897 phba->nvmet_support = 1; /* a match */ 5898 phba->nvmet_support = 1; /* a match */
5899#else
5900 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5901 "6021 Can't enable NVME Target."
5902 " NVME_TARGET_FC infrastructure"
5903 " is not in kernel\n");
5904#endif
5898 } 5905 }
5899 } 5906 }
5900 } 5907 }
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 0a4c19081409..0024de1c6c1f 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -2149,7 +2149,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
2149 /* localport is allocated from the stack, but the registration 2149 /* localport is allocated from the stack, but the registration
2150 * call allocates heap memory as well as the private area. 2150 * call allocates heap memory as well as the private area.
2151 */ 2151 */
2152#ifdef CONFIG_LPFC_NVME_INITIATOR 2152#if (IS_ENABLED(CONFIG_NVME_FC))
2153 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template, 2153 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2154 &vport->phba->pcidev->dev, &localport); 2154 &vport->phba->pcidev->dev, &localport);
2155#else 2155#else
@@ -2190,7 +2190,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
2190void 2190void
2191lpfc_nvme_destroy_localport(struct lpfc_vport *vport) 2191lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2192{ 2192{
2193#ifdef CONFIG_LPFC_NVME_INITIATOR 2193#if (IS_ENABLED(CONFIG_NVME_FC))
2194 struct nvme_fc_local_port *localport; 2194 struct nvme_fc_local_port *localport;
2195 struct lpfc_nvme_lport *lport; 2195 struct lpfc_nvme_lport *lport;
2196 struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL; 2196 struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL;
@@ -2274,7 +2274,7 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport)
2274int 2274int
2275lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2275lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2276{ 2276{
2277#ifdef CONFIG_LPFC_NVME_INITIATOR 2277#if (IS_ENABLED(CONFIG_NVME_FC))
2278 int ret = 0; 2278 int ret = 0;
2279 struct nvme_fc_local_port *localport; 2279 struct nvme_fc_local_port *localport;
2280 struct lpfc_nvme_lport *lport; 2280 struct lpfc_nvme_lport *lport;
@@ -2403,7 +2403,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2403void 2403void
2404lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2404lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2405{ 2405{
2406#ifdef CONFIG_LPFC_NVME_INITIATOR 2406#if (IS_ENABLED(CONFIG_NVME_FC))
2407 int ret; 2407 int ret;
2408 struct nvme_fc_local_port *localport; 2408 struct nvme_fc_local_port *localport;
2409 struct lpfc_nvme_lport *lport; 2409 struct lpfc_nvme_lport *lport;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index b7739a554fe0..acba1b67e505 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -520,7 +520,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
520 struct lpfc_hba *phba = ctxp->phba; 520 struct lpfc_hba *phba = ctxp->phba;
521 struct lpfc_iocbq *nvmewqeq; 521 struct lpfc_iocbq *nvmewqeq;
522 unsigned long iflags; 522 unsigned long iflags;
523 int rc, id; 523 int rc;
524 524
525#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 525#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
526 if (phba->ktime_on) { 526 if (phba->ktime_on) {
@@ -530,7 +530,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
530 ctxp->ts_nvme_data = ktime_get_ns(); 530 ctxp->ts_nvme_data = ktime_get_ns();
531 } 531 }
532 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { 532 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
533 id = smp_processor_id(); 533 int id = smp_processor_id();
534 ctxp->cpu = id; 534 ctxp->cpu = id;
535 if (id < LPFC_CHECK_CPU_CNT) 535 if (id < LPFC_CHECK_CPU_CNT)
536 phba->cpucheck_xmt_io[id]++; 536 phba->cpucheck_xmt_io[id]++;
@@ -671,7 +671,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
671 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | 671 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
672 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED; 672 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED;
673 673
674#ifdef CONFIG_LPFC_NVME_TARGET 674#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
675 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate, 675 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
676 &phba->pcidev->dev, 676 &phba->pcidev->dev,
677 &phba->targetport); 677 &phba->targetport);
@@ -756,7 +756,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
756void 756void
757lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) 757lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
758{ 758{
759#ifdef CONFIG_LPFC_NVME_TARGET 759#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
760 struct lpfc_nvmet_tgtport *tgtp; 760 struct lpfc_nvmet_tgtport *tgtp;
761 761
762 if (phba->nvmet_support == 0) 762 if (phba->nvmet_support == 0)
@@ -788,7 +788,7 @@ static void
788lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 788lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
789 struct hbq_dmabuf *nvmebuf) 789 struct hbq_dmabuf *nvmebuf)
790{ 790{
791#ifdef CONFIG_LPFC_NVME_TARGET 791#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
792 struct lpfc_nvmet_tgtport *tgtp; 792 struct lpfc_nvmet_tgtport *tgtp;
793 struct fc_frame_header *fc_hdr; 793 struct fc_frame_header *fc_hdr;
794 struct lpfc_nvmet_rcv_ctx *ctxp; 794 struct lpfc_nvmet_rcv_ctx *ctxp;
@@ -891,7 +891,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
891 struct rqb_dmabuf *nvmebuf, 891 struct rqb_dmabuf *nvmebuf,
892 uint64_t isr_timestamp) 892 uint64_t isr_timestamp)
893{ 893{
894#ifdef CONFIG_LPFC_NVME_TARGET 894#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
895 struct lpfc_nvmet_rcv_ctx *ctxp; 895 struct lpfc_nvmet_rcv_ctx *ctxp;
896 struct lpfc_nvmet_tgtport *tgtp; 896 struct lpfc_nvmet_tgtport *tgtp;
897 struct fc_frame_header *fc_hdr; 897 struct fc_frame_header *fc_hdr;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index e7e5974e1a2c..2b209bbb4c91 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
35/* 35/*
36 * MegaRAID SAS Driver meta data 36 * MegaRAID SAS Driver meta data
37 */ 37 */
38#define MEGASAS_VERSION "07.701.16.00-rc1" 38#define MEGASAS_VERSION "07.701.17.00-rc1"
39#define MEGASAS_RELDATE "February 2, 2017" 39#define MEGASAS_RELDATE "March 2, 2017"
40 40
41/* 41/*
42 * Device IDs 42 * Device IDs
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 7ac9a9ee9bd4..0016f12cc563 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1963,6 +1963,9 @@ scan_target:
1963 if (!mr_device_priv_data) 1963 if (!mr_device_priv_data)
1964 return -ENOMEM; 1964 return -ENOMEM;
1965 sdev->hostdata = mr_device_priv_data; 1965 sdev->hostdata = mr_device_priv_data;
1966
1967 atomic_set(&mr_device_priv_data->r1_ldio_hint,
1968 instance->r1_ldio_hint_default);
1966 return 0; 1969 return 0;
1967} 1970}
1968 1971
@@ -5034,10 +5037,12 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5034 &instance->irq_context[j]); 5037 &instance->irq_context[j]);
5035 /* Retry irq register for IO_APIC*/ 5038 /* Retry irq register for IO_APIC*/
5036 instance->msix_vectors = 0; 5039 instance->msix_vectors = 0;
5037 if (is_probe) 5040 if (is_probe) {
5041 pci_free_irq_vectors(instance->pdev);
5038 return megasas_setup_irqs_ioapic(instance); 5042 return megasas_setup_irqs_ioapic(instance);
5039 else 5043 } else {
5040 return -1; 5044 return -1;
5045 }
5041 } 5046 }
5042 } 5047 }
5043 return 0; 5048 return 0;
@@ -5277,9 +5282,11 @@ static int megasas_init_fw(struct megasas_instance *instance)
5277 MPI2_REPLY_POST_HOST_INDEX_OFFSET); 5282 MPI2_REPLY_POST_HOST_INDEX_OFFSET);
5278 } 5283 }
5279 5284
5280 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); 5285 if (!instance->msix_vectors) {
5281 if (i < 0) 5286 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
5282 goto fail_setup_irqs; 5287 if (i < 0)
5288 goto fail_setup_irqs;
5289 }
5283 5290
5284 dev_info(&instance->pdev->dev, 5291 dev_info(&instance->pdev->dev,
5285 "firmware supports msix\t: (%d)", fw_msix_count); 5292 "firmware supports msix\t: (%d)", fw_msix_count);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 29650ba669da..f990ab4d45e1 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -2159,7 +2159,7 @@ megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context,
2159 cpu_sel = MR_RAID_CTX_CPUSEL_1; 2159 cpu_sel = MR_RAID_CTX_CPUSEL_1;
2160 2160
2161 if (is_stream_detected(rctx_g35) && 2161 if (is_stream_detected(rctx_g35) &&
2162 (raid->level == 5) && 2162 ((raid->level == 5) || (raid->level == 6)) &&
2163 (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) && 2163 (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) &&
2164 (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS)) 2164 (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS))
2165 cpu_sel = MR_RAID_CTX_CPUSEL_0; 2165 cpu_sel = MR_RAID_CTX_CPUSEL_0;
@@ -2338,7 +2338,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
2338 fp_possible = false; 2338 fp_possible = false;
2339 atomic_dec(&instance->fw_outstanding); 2339 atomic_dec(&instance->fw_outstanding);
2340 } else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) || 2340 } else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) ||
2341 atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint)) { 2341 (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0)) {
2342 fp_possible = false; 2342 fp_possible = false;
2343 atomic_dec(&instance->fw_outstanding); 2343 atomic_dec(&instance->fw_outstanding);
2344 if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE) 2344 if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
index ed58b9104f58..e10b91cc3c62 100644
--- a/drivers/scsi/qedf/qedf_fip.c
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -99,7 +99,8 @@ static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf,
99 qedf_set_vlan_id(qedf, vid); 99 qedf_set_vlan_id(qedf, vid);
100 100
101 /* Inform waiter that it's ok to call fcoe_ctlr_link up() */ 101 /* Inform waiter that it's ok to call fcoe_ctlr_link up() */
102 complete(&qedf->fipvlan_compl); 102 if (!completion_done(&qedf->fipvlan_compl))
103 complete(&qedf->fipvlan_compl);
103 } 104 }
104} 105}
105 106
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 8e2a160490e6..cceddd995a4b 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2803,6 +2803,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
2803 atomic_set(&qedf->num_offloads, 0); 2803 atomic_set(&qedf->num_offloads, 0);
2804 qedf->stop_io_on_error = false; 2804 qedf->stop_io_on_error = false;
2805 pci_set_drvdata(pdev, qedf); 2805 pci_set_drvdata(pdev, qedf);
2806 init_completion(&qedf->fipvlan_compl);
2806 2807
2807 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, 2808 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
2808 "QLogic FastLinQ FCoE Module qedf %s, " 2809 "QLogic FastLinQ FCoE Module qedf %s, "
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 8e3d92807cb8..92775a8b74b1 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -2007,6 +2007,7 @@ static void qedi_remove(struct pci_dev *pdev)
2007 2007
2008static struct pci_device_id qedi_pci_tbl[] = { 2008static struct pci_device_id qedi_pci_tbl[] = {
2009 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) }, 2009 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) },
2010 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8084) },
2010 { 0 }, 2011 { 0 },
2011}; 2012};
2012MODULE_DEVICE_TABLE(pci, qedi_pci_tbl); 2013MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 41d5b09f7326..83d61d2142e9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1160,8 +1160,13 @@ static inline
1160uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha) 1160uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
1161{ 1161{
1162 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1162 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1163 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1163 1164
1164 return ((RD_REG_DWORD(&reg->host_status)) == ISP_REG_DISCONNECT); 1165 if (IS_P3P_TYPE(ha))
1166 return ((RD_REG_DWORD(&reg82->host_int)) == ISP_REG_DISCONNECT);
1167 else
1168 return ((RD_REG_DWORD(&reg->host_status)) ==
1169 ISP_REG_DISCONNECT);
1165} 1170}
1166 1171
1167/************************************************************************** 1172/**************************************************************************
@@ -1651,7 +1656,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1651 /* Don't abort commands in adapter during EEH 1656 /* Don't abort commands in adapter during EEH
1652 * recovery as it's not accessible/responding. 1657 * recovery as it's not accessible/responding.
1653 */ 1658 */
1654 if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) { 1659 if (GET_CMD_SP(sp) && !ha->flags.eeh_busy &&
1660 (sp->type == SRB_SCSI_CMD)) {
1655 /* Get a reference to the sp and drop the lock. 1661 /* Get a reference to the sp and drop the lock.
1656 * The reference ensures this sp->done() call 1662 * The reference ensures this sp->done() call
1657 * - and not the call in qla2xxx_eh_abort() - 1663 * - and not the call in qla2xxx_eh_abort() -
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 19125d72f322..e5a2d590a104 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -496,7 +496,7 @@ static void scsi_run_queue(struct request_queue *q)
496 scsi_starved_list_run(sdev->host); 496 scsi_starved_list_run(sdev->host);
497 497
498 if (q->mq_ops) 498 if (q->mq_ops)
499 blk_mq_start_stopped_hw_queues(q, false); 499 blk_mq_run_hw_queues(q, false);
500 else 500 else
501 blk_run_queue(q); 501 blk_run_queue(q);
502} 502}
@@ -667,7 +667,7 @@ static bool scsi_end_request(struct request *req, int error,
667 !list_empty(&sdev->host->starved_list)) 667 !list_empty(&sdev->host->starved_list))
668 kblockd_schedule_work(&sdev->requeue_work); 668 kblockd_schedule_work(&sdev->requeue_work);
669 else 669 else
670 blk_mq_start_stopped_hw_queues(q, true); 670 blk_mq_run_hw_queues(q, true);
671 } else { 671 } else {
672 unsigned long flags; 672 unsigned long flags;
673 673
@@ -1974,7 +1974,7 @@ out:
1974 case BLK_MQ_RQ_QUEUE_BUSY: 1974 case BLK_MQ_RQ_QUEUE_BUSY:
1975 if (atomic_read(&sdev->device_busy) == 0 && 1975 if (atomic_read(&sdev->device_busy) == 0 &&
1976 !scsi_device_blocked(sdev)) 1976 !scsi_device_blocked(sdev))
1977 blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY); 1977 blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
1978 break; 1978 break;
1979 case BLK_MQ_RQ_QUEUE_ERROR: 1979 case BLK_MQ_RQ_QUEUE_ERROR:
1980 /* 1980 /*
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index fcfeddc79331..35ad5e8a31ab 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2102,6 +2102,22 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
2102 2102
2103#define READ_CAPACITY_RETRIES_ON_RESET 10 2103#define READ_CAPACITY_RETRIES_ON_RESET 10
2104 2104
2105/*
2106 * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
2107 * and the reported logical block size is bigger than 512 bytes. Note
2108 * that last_sector is a u64 and therefore logical_to_sectors() is not
2109 * applicable.
2110 */
2111static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
2112{
2113 u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
2114
2115 if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
2116 return false;
2117
2118 return true;
2119}
2120
2105static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, 2121static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
2106 unsigned char *buffer) 2122 unsigned char *buffer)
2107{ 2123{
@@ -2167,7 +2183,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
2167 return -ENODEV; 2183 return -ENODEV;
2168 } 2184 }
2169 2185
2170 if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) { 2186 if (!sd_addressable_capacity(lba, sector_size)) {
2171 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " 2187 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
2172 "kernel compiled with support for large block " 2188 "kernel compiled with support for large block "
2173 "devices.\n"); 2189 "devices.\n");
@@ -2256,7 +2272,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
2256 return sector_size; 2272 return sector_size;
2257 } 2273 }
2258 2274
2259 if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) { 2275 if (!sd_addressable_capacity(lba, sector_size)) {
2260 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " 2276 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
2261 "kernel compiled with support for large block " 2277 "kernel compiled with support for large block "
2262 "devices.\n"); 2278 "devices.\n");
@@ -2956,7 +2972,8 @@ static int sd_revalidate_disk(struct gendisk *disk)
2956 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); 2972 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
2957 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); 2973 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
2958 } else 2974 } else
2959 rw_max = BLK_DEF_MAX_SECTORS; 2975 rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
2976 (sector_t)BLK_DEF_MAX_SECTORS);
2960 2977
2961 /* Combine with controller limits */ 2978 /* Combine with controller limits */
2962 q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q)); 2979 q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 29b86505f796..225abaad4d1c 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -996,6 +996,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
996 result = get_user(val, ip); 996 result = get_user(val, ip);
997 if (result) 997 if (result)
998 return result; 998 return result;
999 if (val > SG_MAX_CDB_SIZE)
1000 return -ENOMEM;
999 sfp->next_cmd_len = (val > 0) ? val : 0; 1001 sfp->next_cmd_len = (val > 0) ? val : 0;
1000 return 0; 1002 return 0;
1001 case SG_GET_VERSION_NUM: 1003 case SG_GET_VERSION_NUM:
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0b29b9329b1c..a8f630213a1a 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -836,6 +836,7 @@ static void get_capabilities(struct scsi_cd *cd)
836 unsigned char *buffer; 836 unsigned char *buffer;
837 struct scsi_mode_data data; 837 struct scsi_mode_data data;
838 struct scsi_sense_hdr sshdr; 838 struct scsi_sense_hdr sshdr;
839 unsigned int ms_len = 128;
839 int rc, n; 840 int rc, n;
840 841
841 static const char *loadmech[] = 842 static const char *loadmech[] =
@@ -862,10 +863,11 @@ static void get_capabilities(struct scsi_cd *cd)
862 scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr); 863 scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
863 864
864 /* ask for mode page 0x2a */ 865 /* ask for mode page 0x2a */
865 rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128, 866 rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
866 SR_TIMEOUT, 3, &data, NULL); 867 SR_TIMEOUT, 3, &data, NULL);
867 868
868 if (!scsi_status_is_good(rc)) { 869 if (!scsi_status_is_good(rc) || data.length > ms_len ||
870 data.header_length + data.block_descriptor_length > data.length) {
869 /* failed, drive doesn't have capabilities mode page */ 871 /* failed, drive doesn't have capabilities mode page */
870 cd->cdi.speed = 1; 872 cd->cdi.speed = 1;
871 cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R | 873 cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index a72a4ba78125..8e5e6c04c035 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -309,8 +309,8 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
309 309
310 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 310 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
311 mmio_base = devm_ioremap_resource(dev, mem_res); 311 mmio_base = devm_ioremap_resource(dev, mem_res);
312 if (IS_ERR(*(void **)&mmio_base)) { 312 if (IS_ERR(mmio_base)) {
313 err = PTR_ERR(*(void **)&mmio_base); 313 err = PTR_ERR(mmio_base);
314 goto out; 314 goto out;
315 } 315 }
316 316
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 1359913bf840..096e95b911bd 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -4662,8 +4662,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4662 } 4662 }
4663 if (ufshcd_is_clkscaling_supported(hba)) 4663 if (ufshcd_is_clkscaling_supported(hba))
4664 hba->clk_scaling.active_reqs--; 4664 hba->clk_scaling.active_reqs--;
4665 if (ufshcd_is_clkscaling_supported(hba))
4666 hba->clk_scaling.active_reqs--;
4667 } 4665 }
4668 4666
4669 /* clear corresponding bits of completed commands */ 4667 /* clear corresponding bits of completed commands */
@@ -7642,7 +7640,7 @@ static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
7642 if (kstrtoul(buf, 0, &value)) 7640 if (kstrtoul(buf, 0, &value))
7643 return -EINVAL; 7641 return -EINVAL;
7644 7642
7645 if ((value < UFS_PM_LVL_0) || (value >= UFS_PM_LVL_MAX)) 7643 if (value >= UFS_PM_LVL_MAX)
7646 return -EINVAL; 7644 return -EINVAL;
7647 7645
7648 spin_lock_irqsave(hba->host->host_lock, flags); 7646 spin_lock_irqsave(hba->host->host_lock, flags);
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 7cbad0d45b9c..6ba270e0494d 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -409,6 +409,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
409 ret = PTR_ERR(vmfile); 409 ret = PTR_ERR(vmfile);
410 goto out; 410 goto out;
411 } 411 }
412 vmfile->f_mode |= FMODE_LSEEK;
412 asma->file = vmfile; 413 asma->file = vmfile;
413 } 414 }
414 get_file(asma->file); 415 get_file(asma->file);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index a91802432f2f..e3f9ed3690b7 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -485,8 +485,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *);
485 485
486int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 486int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
487{ 487{
488 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 488 return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
489 return 0;
490} 489}
491EXPORT_SYMBOL(iscsit_queue_rsp); 490EXPORT_SYMBOL(iscsit_queue_rsp);
492 491
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index bf40f03755dd..344e8448869c 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1398,11 +1398,10 @@ static u32 lio_sess_get_initiator_sid(
1398static int lio_queue_data_in(struct se_cmd *se_cmd) 1398static int lio_queue_data_in(struct se_cmd *se_cmd)
1399{ 1399{
1400 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1400 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1401 struct iscsi_conn *conn = cmd->conn;
1401 1402
1402 cmd->i_state = ISTATE_SEND_DATAIN; 1403 cmd->i_state = ISTATE_SEND_DATAIN;
1403 cmd->conn->conn_transport->iscsit_queue_data_in(cmd->conn, cmd); 1404 return conn->conn_transport->iscsit_queue_data_in(conn, cmd);
1404
1405 return 0;
1406} 1405}
1407 1406
1408static int lio_write_pending(struct se_cmd *se_cmd) 1407static int lio_write_pending(struct se_cmd *se_cmd)
@@ -1431,16 +1430,14 @@ static int lio_write_pending_status(struct se_cmd *se_cmd)
1431static int lio_queue_status(struct se_cmd *se_cmd) 1430static int lio_queue_status(struct se_cmd *se_cmd)
1432{ 1431{
1433 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1432 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1433 struct iscsi_conn *conn = cmd->conn;
1434 1434
1435 cmd->i_state = ISTATE_SEND_STATUS; 1435 cmd->i_state = ISTATE_SEND_STATUS;
1436 1436
1437 if (cmd->se_cmd.scsi_status || cmd->sense_reason) { 1437 if (cmd->se_cmd.scsi_status || cmd->sense_reason) {
1438 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 1438 return iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
1439 return 0;
1440 } 1439 }
1441 cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd); 1440 return conn->conn_transport->iscsit_queue_status(conn, cmd);
1442
1443 return 0;
1444} 1441}
1445 1442
1446static void lio_queue_tm_rsp(struct se_cmd *se_cmd) 1443static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index e65bf78ceef3..fce627628200 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -782,22 +782,6 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
782 if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) 782 if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
783 SET_PSTATE_REPLY_OPTIONAL(param); 783 SET_PSTATE_REPLY_OPTIONAL(param);
784 /* 784 /*
785 * The GlobalSAN iSCSI Initiator for MacOSX does
786 * not respond to MaxBurstLength, FirstBurstLength,
787 * DefaultTime2Wait or DefaultTime2Retain parameter keys.
788 * So, we set them to 'reply optional' here, and assume the
789 * the defaults from iscsi_parameters.h if the initiator
790 * is not RFC compliant and the keys are not negotiated.
791 */
792 if (!strcmp(param->name, MAXBURSTLENGTH))
793 SET_PSTATE_REPLY_OPTIONAL(param);
794 if (!strcmp(param->name, FIRSTBURSTLENGTH))
795 SET_PSTATE_REPLY_OPTIONAL(param);
796 if (!strcmp(param->name, DEFAULTTIME2WAIT))
797 SET_PSTATE_REPLY_OPTIONAL(param);
798 if (!strcmp(param->name, DEFAULTTIME2RETAIN))
799 SET_PSTATE_REPLY_OPTIONAL(param);
800 /*
801 * Required for gPXE iSCSI boot client 785 * Required for gPXE iSCSI boot client
802 */ 786 */
803 if (!strcmp(param->name, MAXCONNECTIONS)) 787 if (!strcmp(param->name, MAXCONNECTIONS))
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 5041a9c8bdcb..7d3e2fcc26a0 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -567,7 +567,7 @@ static void iscsit_remove_cmd_from_immediate_queue(
567 } 567 }
568} 568}
569 569
570void iscsit_add_cmd_to_response_queue( 570int iscsit_add_cmd_to_response_queue(
571 struct iscsi_cmd *cmd, 571 struct iscsi_cmd *cmd,
572 struct iscsi_conn *conn, 572 struct iscsi_conn *conn,
573 u8 state) 573 u8 state)
@@ -578,7 +578,7 @@ void iscsit_add_cmd_to_response_queue(
578 if (!qr) { 578 if (!qr) {
579 pr_err("Unable to allocate memory for" 579 pr_err("Unable to allocate memory for"
580 " struct iscsi_queue_req\n"); 580 " struct iscsi_queue_req\n");
581 return; 581 return -ENOMEM;
582 } 582 }
583 INIT_LIST_HEAD(&qr->qr_list); 583 INIT_LIST_HEAD(&qr->qr_list);
584 qr->cmd = cmd; 584 qr->cmd = cmd;
@@ -590,6 +590,7 @@ void iscsit_add_cmd_to_response_queue(
590 spin_unlock_bh(&conn->response_queue_lock); 590 spin_unlock_bh(&conn->response_queue_lock);
591 591
592 wake_up(&conn->queues_wq); 592 wake_up(&conn->queues_wq);
593 return 0;
593} 594}
594 595
595struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn) 596struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
@@ -737,21 +738,23 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
737{ 738{
738 struct se_cmd *se_cmd = NULL; 739 struct se_cmd *se_cmd = NULL;
739 int rc; 740 int rc;
741 bool op_scsi = false;
740 /* 742 /*
741 * Determine if a struct se_cmd is associated with 743 * Determine if a struct se_cmd is associated with
742 * this struct iscsi_cmd. 744 * this struct iscsi_cmd.
743 */ 745 */
744 switch (cmd->iscsi_opcode) { 746 switch (cmd->iscsi_opcode) {
745 case ISCSI_OP_SCSI_CMD: 747 case ISCSI_OP_SCSI_CMD:
746 se_cmd = &cmd->se_cmd; 748 op_scsi = true;
747 __iscsit_free_cmd(cmd, true, shutdown);
748 /* 749 /*
749 * Fallthrough 750 * Fallthrough
750 */ 751 */
751 case ISCSI_OP_SCSI_TMFUNC: 752 case ISCSI_OP_SCSI_TMFUNC:
752 rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); 753 se_cmd = &cmd->se_cmd;
753 if (!rc && shutdown && se_cmd && se_cmd->se_sess) { 754 __iscsit_free_cmd(cmd, op_scsi, shutdown);
754 __iscsit_free_cmd(cmd, true, shutdown); 755 rc = transport_generic_free_cmd(se_cmd, shutdown);
756 if (!rc && shutdown && se_cmd->se_sess) {
757 __iscsit_free_cmd(cmd, op_scsi, shutdown);
755 target_put_sess_cmd(se_cmd); 758 target_put_sess_cmd(se_cmd);
756 } 759 }
757 break; 760 break;
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 8ff08856516a..9e4197af8708 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -31,7 +31,7 @@ extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd
31 struct iscsi_conn_recovery **, itt_t); 31 struct iscsi_conn_recovery **, itt_t);
32extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); 32extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
33extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *); 33extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
34extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); 34extern int iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
35extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *); 35extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
36extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *); 36extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
37extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); 37extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index fd7c16a7ca6e..fc4a9c303d55 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -197,8 +197,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
197 /* 197 /*
198 * Set the ASYMMETRIC ACCESS State 198 * Set the ASYMMETRIC ACCESS State
199 */ 199 */
200 buf[off++] |= (atomic_read( 200 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff;
201 &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
202 /* 201 /*
203 * Set supported ASYMMETRIC ACCESS State bits 202 * Set supported ASYMMETRIC ACCESS State bits
204 */ 203 */
@@ -710,7 +709,7 @@ target_alua_state_check(struct se_cmd *cmd)
710 709
711 spin_lock(&lun->lun_tg_pt_gp_lock); 710 spin_lock(&lun->lun_tg_pt_gp_lock);
712 tg_pt_gp = lun->lun_tg_pt_gp; 711 tg_pt_gp = lun->lun_tg_pt_gp;
713 out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); 712 out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
714 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; 713 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
715 714
716 // XXX: keeps using tg_pt_gp witout reference after unlock 715 // XXX: keeps using tg_pt_gp witout reference after unlock
@@ -911,7 +910,7 @@ static int core_alua_write_tpg_metadata(
911} 910}
912 911
913/* 912/*
914 * Called with tg_pt_gp->tg_pt_gp_md_mutex held 913 * Called with tg_pt_gp->tg_pt_gp_transition_mutex held
915 */ 914 */
916static int core_alua_update_tpg_primary_metadata( 915static int core_alua_update_tpg_primary_metadata(
917 struct t10_alua_tg_pt_gp *tg_pt_gp) 916 struct t10_alua_tg_pt_gp *tg_pt_gp)
@@ -934,7 +933,7 @@ static int core_alua_update_tpg_primary_metadata(
934 "alua_access_state=0x%02x\n" 933 "alua_access_state=0x%02x\n"
935 "alua_access_status=0x%02x\n", 934 "alua_access_status=0x%02x\n",
936 tg_pt_gp->tg_pt_gp_id, 935 tg_pt_gp->tg_pt_gp_id,
937 tg_pt_gp->tg_pt_gp_alua_pending_state, 936 tg_pt_gp->tg_pt_gp_alua_access_state,
938 tg_pt_gp->tg_pt_gp_alua_access_status); 937 tg_pt_gp->tg_pt_gp_alua_access_status);
939 938
940 snprintf(path, ALUA_METADATA_PATH_LEN, 939 snprintf(path, ALUA_METADATA_PATH_LEN,
@@ -1013,93 +1012,41 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
1013 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1012 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1014} 1013}
1015 1014
1016static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
1017{
1018 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
1019 struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
1020 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1021 bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
1022 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
1023
1024 /*
1025 * Update the ALUA metadata buf that has been allocated in
1026 * core_alua_do_port_transition(), this metadata will be written
1027 * to struct file.
1028 *
1029 * Note that there is the case where we do not want to update the
1030 * metadata when the saved metadata is being parsed in userspace
1031 * when setting the existing port access state and access status.
1032 *
1033 * Also note that the failure to write out the ALUA metadata to
1034 * struct file does NOT affect the actual ALUA transition.
1035 */
1036 if (tg_pt_gp->tg_pt_gp_write_metadata) {
1037 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
1038 core_alua_update_tpg_primary_metadata(tg_pt_gp);
1039 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
1040 }
1041 /*
1042 * Set the current primary ALUA access state to the requested new state
1043 */
1044 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1045 tg_pt_gp->tg_pt_gp_alua_pending_state);
1046
1047 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1048 " from primary access state %s to %s\n", (explicit) ? "explicit" :
1049 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1050 tg_pt_gp->tg_pt_gp_id,
1051 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
1052 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
1053
1054 core_alua_queue_state_change_ua(tg_pt_gp);
1055
1056 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1057 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1058 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1059
1060 if (tg_pt_gp->tg_pt_gp_transition_complete)
1061 complete(tg_pt_gp->tg_pt_gp_transition_complete);
1062}
1063
1064static int core_alua_do_transition_tg_pt( 1015static int core_alua_do_transition_tg_pt(
1065 struct t10_alua_tg_pt_gp *tg_pt_gp, 1016 struct t10_alua_tg_pt_gp *tg_pt_gp,
1066 int new_state, 1017 int new_state,
1067 int explicit) 1018 int explicit)
1068{ 1019{
1069 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1020 int prev_state;
1070 DECLARE_COMPLETION_ONSTACK(wait);
1071 1021
1022 mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1072 /* Nothing to be done here */ 1023 /* Nothing to be done here */
1073 if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) 1024 if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) {
1025 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1074 return 0; 1026 return 0;
1027 }
1075 1028
1076 if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) 1029 if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) {
1030 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1077 return -EAGAIN; 1031 return -EAGAIN;
1078 1032 }
1079 /*
1080 * Flush any pending transitions
1081 */
1082 if (!explicit)
1083 flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
1084 1033
1085 /* 1034 /*
1086 * Save the old primary ALUA access state, and set the current state 1035 * Save the old primary ALUA access state, and set the current state
1087 * to ALUA_ACCESS_STATE_TRANSITION. 1036 * to ALUA_ACCESS_STATE_TRANSITION.
1088 */ 1037 */
1089 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1038 prev_state = tg_pt_gp->tg_pt_gp_alua_access_state;
1090 ALUA_ACCESS_STATE_TRANSITION); 1039 tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION;
1091 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? 1040 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
1092 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : 1041 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1093 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; 1042 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1094 1043
1095 core_alua_queue_state_change_ua(tg_pt_gp); 1044 core_alua_queue_state_change_ua(tg_pt_gp);
1096 1045
1097 if (new_state == ALUA_ACCESS_STATE_TRANSITION) 1046 if (new_state == ALUA_ACCESS_STATE_TRANSITION) {
1047 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1098 return 0; 1048 return 0;
1099 1049 }
1100 tg_pt_gp->tg_pt_gp_alua_previous_state =
1101 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
1102 tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1103 1050
1104 /* 1051 /*
1105 * Check for the optional ALUA primary state transition delay 1052 * Check for the optional ALUA primary state transition delay
@@ -1108,19 +1055,36 @@ static int core_alua_do_transition_tg_pt(
1108 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); 1055 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1109 1056
1110 /* 1057 /*
1111 * Take a reference for workqueue item 1058 * Set the current primary ALUA access state to the requested new state
1112 */ 1059 */
1113 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1060 tg_pt_gp->tg_pt_gp_alua_access_state = new_state;
1114 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1115 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1116 1061
1117 schedule_work(&tg_pt_gp->tg_pt_gp_transition_work); 1062 /*
1118 if (explicit) { 1063 * Update the ALUA metadata buf that has been allocated in
1119 tg_pt_gp->tg_pt_gp_transition_complete = &wait; 1064 * core_alua_do_port_transition(), this metadata will be written
1120 wait_for_completion(&wait); 1065 * to struct file.
1121 tg_pt_gp->tg_pt_gp_transition_complete = NULL; 1066 *
1067 * Note that there is the case where we do not want to update the
1068 * metadata when the saved metadata is being parsed in userspace
1069 * when setting the existing port access state and access status.
1070 *
1071 * Also note that the failure to write out the ALUA metadata to
1072 * struct file does NOT affect the actual ALUA transition.
1073 */
1074 if (tg_pt_gp->tg_pt_gp_write_metadata) {
1075 core_alua_update_tpg_primary_metadata(tg_pt_gp);
1122 } 1076 }
1123 1077
1078 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1079 " from primary access state %s to %s\n", (explicit) ? "explicit" :
1080 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1081 tg_pt_gp->tg_pt_gp_id,
1082 core_alua_dump_state(prev_state),
1083 core_alua_dump_state(new_state));
1084
1085 core_alua_queue_state_change_ua(tg_pt_gp);
1086
1087 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1124 return 0; 1088 return 0;
1125} 1089}
1126 1090
@@ -1685,14 +1649,12 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1685 } 1649 }
1686 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); 1650 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1687 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list); 1651 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
1688 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); 1652 mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex);
1689 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); 1653 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1690 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); 1654 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1691 INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
1692 core_alua_do_transition_tg_pt_work);
1693 tg_pt_gp->tg_pt_gp_dev = dev; 1655 tg_pt_gp->tg_pt_gp_dev = dev;
1694 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1656 tg_pt_gp->tg_pt_gp_alua_access_state =
1695 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); 1657 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
1696 /* 1658 /*
1697 * Enable both explicit and implicit ALUA support by default 1659 * Enable both explicit and implicit ALUA support by default
1698 */ 1660 */
@@ -1797,8 +1759,6 @@ void core_alua_free_tg_pt_gp(
1797 dev->t10_alua.alua_tg_pt_gps_counter--; 1759 dev->t10_alua.alua_tg_pt_gps_counter--;
1798 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1760 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1799 1761
1800 flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
1801
1802 /* 1762 /*
1803 * Allow a struct t10_alua_tg_pt_gp_member * referenced by 1763 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1804 * core_alua_get_tg_pt_gp_by_name() in 1764 * core_alua_get_tg_pt_gp_by_name() in
@@ -1938,8 +1898,8 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
1938 "Primary Access Status: %s\nTG Port Secondary Access" 1898 "Primary Access Status: %s\nTG Port Secondary Access"
1939 " State: %s\nTG Port Secondary Access Status: %s\n", 1899 " State: %s\nTG Port Secondary Access Status: %s\n",
1940 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id, 1900 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1941 core_alua_dump_state(atomic_read( 1901 core_alua_dump_state(
1942 &tg_pt_gp->tg_pt_gp_alua_access_state)), 1902 tg_pt_gp->tg_pt_gp_alua_access_state),
1943 core_alua_dump_status( 1903 core_alua_dump_status(
1944 tg_pt_gp->tg_pt_gp_alua_access_status), 1904 tg_pt_gp->tg_pt_gp_alua_access_status),
1945 atomic_read(&lun->lun_tg_pt_secondary_offline) ? 1905 atomic_read(&lun->lun_tg_pt_secondary_offline) ?
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 38b5025e4c7a..70657fd56440 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -2392,7 +2392,7 @@ static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item,
2392 char *page) 2392 char *page)
2393{ 2393{
2394 return sprintf(page, "%d\n", 2394 return sprintf(page, "%d\n",
2395 atomic_read(&to_tg_pt_gp(item)->tg_pt_gp_alua_access_state)); 2395 to_tg_pt_gp(item)->tg_pt_gp_alua_access_state);
2396} 2396}
2397 2397
2398static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item, 2398static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index d8a16ca6baa5..d1e6cab8e3d3 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -92,6 +92,11 @@ static int target_fabric_mappedlun_link(
92 pr_err("Source se_lun->lun_se_dev does not exist\n"); 92 pr_err("Source se_lun->lun_se_dev does not exist\n");
93 return -EINVAL; 93 return -EINVAL;
94 } 94 }
95 if (lun->lun_shutdown) {
96 pr_err("Unable to create mappedlun symlink because"
97 " lun->lun_shutdown=true\n");
98 return -EINVAL;
99 }
95 se_tpg = lun->lun_tpg; 100 se_tpg = lun->lun_tpg;
96 101
97 nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item; 102 nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 6fb191914f45..dfaef4d3b2d2 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -642,6 +642,8 @@ void core_tpg_remove_lun(
642 */ 642 */
643 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 643 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
644 644
645 lun->lun_shutdown = true;
646
645 core_clear_lun_from_tpg(lun, tpg); 647 core_clear_lun_from_tpg(lun, tpg);
646 /* 648 /*
647 * Wait for any active I/O references to percpu se_lun->lun_ref to 649 * Wait for any active I/O references to percpu se_lun->lun_ref to
@@ -663,6 +665,8 @@ void core_tpg_remove_lun(
663 } 665 }
664 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 666 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
665 hlist_del_rcu(&lun->link); 667 hlist_del_rcu(&lun->link);
668
669 lun->lun_shutdown = false;
666 mutex_unlock(&tpg->tpg_lun_mutex); 670 mutex_unlock(&tpg->tpg_lun_mutex);
667 671
668 percpu_ref_exit(&lun->lun_ref); 672 percpu_ref_exit(&lun->lun_ref);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index b1a3cdb29468..a0cd56ee5fe9 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -64,8 +64,9 @@ struct kmem_cache *t10_alua_lba_map_cache;
64struct kmem_cache *t10_alua_lba_map_mem_cache; 64struct kmem_cache *t10_alua_lba_map_mem_cache;
65 65
66static void transport_complete_task_attr(struct se_cmd *cmd); 66static void transport_complete_task_attr(struct se_cmd *cmd);
67static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
67static void transport_handle_queue_full(struct se_cmd *cmd, 68static void transport_handle_queue_full(struct se_cmd *cmd,
68 struct se_device *dev); 69 struct se_device *dev, int err, bool write_pending);
69static int transport_put_cmd(struct se_cmd *cmd); 70static int transport_put_cmd(struct se_cmd *cmd);
70static void target_complete_ok_work(struct work_struct *work); 71static void target_complete_ok_work(struct work_struct *work);
71 72
@@ -804,7 +805,8 @@ void target_qf_do_work(struct work_struct *work)
804 805
805 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 806 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
806 transport_write_pending_qf(cmd); 807 transport_write_pending_qf(cmd);
807 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) 808 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
809 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
808 transport_complete_qf(cmd); 810 transport_complete_qf(cmd);
809 } 811 }
810} 812}
@@ -1719,7 +1721,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1719 } 1721 }
1720 trace_target_cmd_complete(cmd); 1722 trace_target_cmd_complete(cmd);
1721 ret = cmd->se_tfo->queue_status(cmd); 1723 ret = cmd->se_tfo->queue_status(cmd);
1722 if (ret == -EAGAIN || ret == -ENOMEM) 1724 if (ret)
1723 goto queue_full; 1725 goto queue_full;
1724 goto check_stop; 1726 goto check_stop;
1725 default: 1727 default:
@@ -1730,7 +1732,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1730 } 1732 }
1731 1733
1732 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1734 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
1733 if (ret == -EAGAIN || ret == -ENOMEM) 1735 if (ret)
1734 goto queue_full; 1736 goto queue_full;
1735 1737
1736check_stop: 1738check_stop:
@@ -1739,8 +1741,7 @@ check_stop:
1739 return; 1741 return;
1740 1742
1741queue_full: 1743queue_full:
1742 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1744 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
1743 transport_handle_queue_full(cmd, cmd->se_dev);
1744} 1745}
1745EXPORT_SYMBOL(transport_generic_request_failure); 1746EXPORT_SYMBOL(transport_generic_request_failure);
1746 1747
@@ -1977,13 +1978,29 @@ static void transport_complete_qf(struct se_cmd *cmd)
1977 int ret = 0; 1978 int ret = 0;
1978 1979
1979 transport_complete_task_attr(cmd); 1980 transport_complete_task_attr(cmd);
1981 /*
1982 * If a fabric driver ->write_pending() or ->queue_data_in() callback
1983 * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
1984 * the same callbacks should not be retried. Return CHECK_CONDITION
1985 * if a scsi_status is not already set.
1986 *
1987 * If a fabric driver ->queue_status() has returned non zero, always
1988 * keep retrying no matter what..
1989 */
1990 if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
1991 if (cmd->scsi_status)
1992 goto queue_status;
1980 1993
1981 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1994 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
1982 trace_target_cmd_complete(cmd); 1995 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
1983 ret = cmd->se_tfo->queue_status(cmd); 1996 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
1984 goto out; 1997 translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
1998 goto queue_status;
1985 } 1999 }
1986 2000
2001 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
2002 goto queue_status;
2003
1987 switch (cmd->data_direction) { 2004 switch (cmd->data_direction) {
1988 case DMA_FROM_DEVICE: 2005 case DMA_FROM_DEVICE:
1989 if (cmd->scsi_status) 2006 if (cmd->scsi_status)
@@ -2007,19 +2024,33 @@ queue_status:
2007 break; 2024 break;
2008 } 2025 }
2009 2026
2010out:
2011 if (ret < 0) { 2027 if (ret < 0) {
2012 transport_handle_queue_full(cmd, cmd->se_dev); 2028 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2013 return; 2029 return;
2014 } 2030 }
2015 transport_lun_remove_cmd(cmd); 2031 transport_lun_remove_cmd(cmd);
2016 transport_cmd_check_stop_to_fabric(cmd); 2032 transport_cmd_check_stop_to_fabric(cmd);
2017} 2033}
2018 2034
2019static void transport_handle_queue_full( 2035static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
2020 struct se_cmd *cmd, 2036 int err, bool write_pending)
2021 struct se_device *dev)
2022{ 2037{
2038 /*
2039 * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
2040 * ->queue_data_in() callbacks from new process context.
2041 *
2042 * Otherwise for other errors, transport_complete_qf() will send
2043 * CHECK_CONDITION via ->queue_status() instead of attempting to
2044 * retry associated fabric driver data-transfer callbacks.
2045 */
2046 if (err == -EAGAIN || err == -ENOMEM) {
2047 cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
2048 TRANSPORT_COMPLETE_QF_OK;
2049 } else {
2050 pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
2051 cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
2052 }
2053
2023 spin_lock_irq(&dev->qf_cmd_lock); 2054 spin_lock_irq(&dev->qf_cmd_lock);
2024 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 2055 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
2025 atomic_inc_mb(&dev->dev_qf_count); 2056 atomic_inc_mb(&dev->dev_qf_count);
@@ -2083,7 +2114,7 @@ static void target_complete_ok_work(struct work_struct *work)
2083 WARN_ON(!cmd->scsi_status); 2114 WARN_ON(!cmd->scsi_status);
2084 ret = transport_send_check_condition_and_sense( 2115 ret = transport_send_check_condition_and_sense(
2085 cmd, 0, 1); 2116 cmd, 0, 1);
2086 if (ret == -EAGAIN || ret == -ENOMEM) 2117 if (ret)
2087 goto queue_full; 2118 goto queue_full;
2088 2119
2089 transport_lun_remove_cmd(cmd); 2120 transport_lun_remove_cmd(cmd);
@@ -2109,7 +2140,7 @@ static void target_complete_ok_work(struct work_struct *work)
2109 } else if (rc) { 2140 } else if (rc) {
2110 ret = transport_send_check_condition_and_sense(cmd, 2141 ret = transport_send_check_condition_and_sense(cmd,
2111 rc, 0); 2142 rc, 0);
2112 if (ret == -EAGAIN || ret == -ENOMEM) 2143 if (ret)
2113 goto queue_full; 2144 goto queue_full;
2114 2145
2115 transport_lun_remove_cmd(cmd); 2146 transport_lun_remove_cmd(cmd);
@@ -2134,7 +2165,7 @@ queue_rsp:
2134 if (target_read_prot_action(cmd)) { 2165 if (target_read_prot_action(cmd)) {
2135 ret = transport_send_check_condition_and_sense(cmd, 2166 ret = transport_send_check_condition_and_sense(cmd,
2136 cmd->pi_err, 0); 2167 cmd->pi_err, 0);
2137 if (ret == -EAGAIN || ret == -ENOMEM) 2168 if (ret)
2138 goto queue_full; 2169 goto queue_full;
2139 2170
2140 transport_lun_remove_cmd(cmd); 2171 transport_lun_remove_cmd(cmd);
@@ -2144,7 +2175,7 @@ queue_rsp:
2144 2175
2145 trace_target_cmd_complete(cmd); 2176 trace_target_cmd_complete(cmd);
2146 ret = cmd->se_tfo->queue_data_in(cmd); 2177 ret = cmd->se_tfo->queue_data_in(cmd);
2147 if (ret == -EAGAIN || ret == -ENOMEM) 2178 if (ret)
2148 goto queue_full; 2179 goto queue_full;
2149 break; 2180 break;
2150 case DMA_TO_DEVICE: 2181 case DMA_TO_DEVICE:
@@ -2157,7 +2188,7 @@ queue_rsp:
2157 atomic_long_add(cmd->data_length, 2188 atomic_long_add(cmd->data_length,
2158 &cmd->se_lun->lun_stats.tx_data_octets); 2189 &cmd->se_lun->lun_stats.tx_data_octets);
2159 ret = cmd->se_tfo->queue_data_in(cmd); 2190 ret = cmd->se_tfo->queue_data_in(cmd);
2160 if (ret == -EAGAIN || ret == -ENOMEM) 2191 if (ret)
2161 goto queue_full; 2192 goto queue_full;
2162 break; 2193 break;
2163 } 2194 }
@@ -2166,7 +2197,7 @@ queue_rsp:
2166queue_status: 2197queue_status:
2167 trace_target_cmd_complete(cmd); 2198 trace_target_cmd_complete(cmd);
2168 ret = cmd->se_tfo->queue_status(cmd); 2199 ret = cmd->se_tfo->queue_status(cmd);
2169 if (ret == -EAGAIN || ret == -ENOMEM) 2200 if (ret)
2170 goto queue_full; 2201 goto queue_full;
2171 break; 2202 break;
2172 default: 2203 default:
@@ -2180,8 +2211,8 @@ queue_status:
2180queue_full: 2211queue_full:
2181 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 2212 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2182 " data_direction: %d\n", cmd, cmd->data_direction); 2213 " data_direction: %d\n", cmd, cmd->data_direction);
2183 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 2214
2184 transport_handle_queue_full(cmd, cmd->se_dev); 2215 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2185} 2216}
2186 2217
2187void target_free_sgl(struct scatterlist *sgl, int nents) 2218void target_free_sgl(struct scatterlist *sgl, int nents)
@@ -2449,18 +2480,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)
2449 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2480 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2450 2481
2451 ret = cmd->se_tfo->write_pending(cmd); 2482 ret = cmd->se_tfo->write_pending(cmd);
2452 if (ret == -EAGAIN || ret == -ENOMEM) 2483 if (ret)
2453 goto queue_full; 2484 goto queue_full;
2454 2485
2455 /* fabric drivers should only return -EAGAIN or -ENOMEM as error */ 2486 return 0;
2456 WARN_ON(ret);
2457
2458 return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2459 2487
2460queue_full: 2488queue_full:
2461 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2489 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2462 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 2490 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2463 transport_handle_queue_full(cmd, cmd->se_dev);
2464 return 0; 2491 return 0;
2465} 2492}
2466EXPORT_SYMBOL(transport_generic_new_cmd); 2493EXPORT_SYMBOL(transport_generic_new_cmd);
@@ -2470,10 +2497,10 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
2470 int ret; 2497 int ret;
2471 2498
2472 ret = cmd->se_tfo->write_pending(cmd); 2499 ret = cmd->se_tfo->write_pending(cmd);
2473 if (ret == -EAGAIN || ret == -ENOMEM) { 2500 if (ret) {
2474 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2501 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2475 cmd); 2502 cmd);
2476 transport_handle_queue_full(cmd, cmd->se_dev); 2503 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2477 } 2504 }
2478} 2505}
2479 2506
@@ -3011,6 +3038,8 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3011 __releases(&cmd->t_state_lock) 3038 __releases(&cmd->t_state_lock)
3012 __acquires(&cmd->t_state_lock) 3039 __acquires(&cmd->t_state_lock)
3013{ 3040{
3041 int ret;
3042
3014 assert_spin_locked(&cmd->t_state_lock); 3043 assert_spin_locked(&cmd->t_state_lock);
3015 WARN_ON_ONCE(!irqs_disabled()); 3044 WARN_ON_ONCE(!irqs_disabled());
3016 3045
@@ -3034,7 +3063,9 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3034 trace_target_cmd_complete(cmd); 3063 trace_target_cmd_complete(cmd);
3035 3064
3036 spin_unlock_irq(&cmd->t_state_lock); 3065 spin_unlock_irq(&cmd->t_state_lock);
3037 cmd->se_tfo->queue_status(cmd); 3066 ret = cmd->se_tfo->queue_status(cmd);
3067 if (ret)
3068 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3038 spin_lock_irq(&cmd->t_state_lock); 3069 spin_lock_irq(&cmd->t_state_lock);
3039 3070
3040 return 1; 3071 return 1;
@@ -3055,6 +3086,7 @@ EXPORT_SYMBOL(transport_check_aborted_status);
3055void transport_send_task_abort(struct se_cmd *cmd) 3086void transport_send_task_abort(struct se_cmd *cmd)
3056{ 3087{
3057 unsigned long flags; 3088 unsigned long flags;
3089 int ret;
3058 3090
3059 spin_lock_irqsave(&cmd->t_state_lock, flags); 3091 spin_lock_irqsave(&cmd->t_state_lock, flags);
3060 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { 3092 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
@@ -3090,7 +3122,9 @@ send_abort:
3090 cmd->t_task_cdb[0], cmd->tag); 3122 cmd->t_task_cdb[0], cmd->tag);
3091 3123
3092 trace_target_cmd_complete(cmd); 3124 trace_target_cmd_complete(cmd);
3093 cmd->se_tfo->queue_status(cmd); 3125 ret = cmd->se_tfo->queue_status(cmd);
3126 if (ret)
3127 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3094} 3128}
3095 3129
3096static void target_tmr_work(struct work_struct *work) 3130static void target_tmr_work(struct work_struct *work)
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index c6874c38a10b..f615c3bbb73e 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -311,24 +311,50 @@ static void free_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd)
311 DATA_BLOCK_BITS); 311 DATA_BLOCK_BITS);
312} 312}
313 313
314static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap, 314static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
315 struct scatterlist *data_sg, unsigned int data_nents) 315 bool bidi)
316{ 316{
317 struct se_cmd *se_cmd = cmd->se_cmd;
317 int i, block; 318 int i, block;
318 int block_remaining = 0; 319 int block_remaining = 0;
319 void *from, *to; 320 void *from, *to;
320 size_t copy_bytes, from_offset; 321 size_t copy_bytes, from_offset;
321 struct scatterlist *sg; 322 struct scatterlist *sg, *data_sg;
323 unsigned int data_nents;
324 DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
325
326 bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
327
328 if (!bidi) {
329 data_sg = se_cmd->t_data_sg;
330 data_nents = se_cmd->t_data_nents;
331 } else {
332 uint32_t count;
333
334 /*
335 * For bidi case, the first count blocks are for Data-Out
336 * buffer blocks, and before gathering the Data-In buffer
337 * the Data-Out buffer blocks should be discarded.
338 */
339 count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
340 while (count--) {
341 block = find_first_bit(bitmap, DATA_BLOCK_BITS);
342 clear_bit(block, bitmap);
343 }
344
345 data_sg = se_cmd->t_bidi_data_sg;
346 data_nents = se_cmd->t_bidi_data_nents;
347 }
322 348
323 for_each_sg(data_sg, sg, data_nents, i) { 349 for_each_sg(data_sg, sg, data_nents, i) {
324 int sg_remaining = sg->length; 350 int sg_remaining = sg->length;
325 to = kmap_atomic(sg_page(sg)) + sg->offset; 351 to = kmap_atomic(sg_page(sg)) + sg->offset;
326 while (sg_remaining > 0) { 352 while (sg_remaining > 0) {
327 if (block_remaining == 0) { 353 if (block_remaining == 0) {
328 block = find_first_bit(cmd_bitmap, 354 block = find_first_bit(bitmap,
329 DATA_BLOCK_BITS); 355 DATA_BLOCK_BITS);
330 block_remaining = DATA_BLOCK_SIZE; 356 block_remaining = DATA_BLOCK_SIZE;
331 clear_bit(block, cmd_bitmap); 357 clear_bit(block, bitmap);
332 } 358 }
333 copy_bytes = min_t(size_t, sg_remaining, 359 copy_bytes = min_t(size_t, sg_remaining,
334 block_remaining); 360 block_remaining);
@@ -394,6 +420,27 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
394 return true; 420 return true;
395} 421}
396 422
423static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
424{
425 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
426 size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
427
428 if (se_cmd->se_cmd_flags & SCF_BIDI) {
429 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
430 data_length += round_up(se_cmd->t_bidi_data_sg->length,
431 DATA_BLOCK_SIZE);
432 }
433
434 return data_length;
435}
436
437static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
438{
439 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
440
441 return data_length / DATA_BLOCK_SIZE;
442}
443
397static sense_reason_t 444static sense_reason_t
398tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) 445tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
399{ 446{
@@ -407,7 +454,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
407 uint32_t cmd_head; 454 uint32_t cmd_head;
408 uint64_t cdb_off; 455 uint64_t cdb_off;
409 bool copy_to_data_area; 456 bool copy_to_data_area;
410 size_t data_length; 457 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
411 DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS); 458 DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
412 459
413 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) 460 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
@@ -421,8 +468,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
421 * expensive to tell how many regions are freed in the bitmap 468 * expensive to tell how many regions are freed in the bitmap
422 */ 469 */
423 base_command_size = max(offsetof(struct tcmu_cmd_entry, 470 base_command_size = max(offsetof(struct tcmu_cmd_entry,
424 req.iov[se_cmd->t_bidi_data_nents + 471 req.iov[tcmu_cmd_get_block_cnt(tcmu_cmd)]),
425 se_cmd->t_data_nents]),
426 sizeof(struct tcmu_cmd_entry)); 472 sizeof(struct tcmu_cmd_entry));
427 command_size = base_command_size 473 command_size = base_command_size
428 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE); 474 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
@@ -433,11 +479,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
433 479
434 mb = udev->mb_addr; 480 mb = udev->mb_addr;
435 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 481 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
436 data_length = se_cmd->data_length;
437 if (se_cmd->se_cmd_flags & SCF_BIDI) {
438 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
439 data_length += se_cmd->t_bidi_data_sg->length;
440 }
441 if ((command_size > (udev->cmdr_size / 2)) || 482 if ((command_size > (udev->cmdr_size / 2)) ||
442 data_length > udev->data_size) { 483 data_length > udev->data_size) {
443 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu " 484 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
@@ -511,11 +552,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
511 entry->req.iov_dif_cnt = 0; 552 entry->req.iov_dif_cnt = 0;
512 553
513 /* Handle BIDI commands */ 554 /* Handle BIDI commands */
514 iov_cnt = 0; 555 if (se_cmd->se_cmd_flags & SCF_BIDI) {
515 alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg, 556 iov_cnt = 0;
516 se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false); 557 iov++;
517 entry->req.iov_bidi_cnt = iov_cnt; 558 alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
518 559 se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
560 false);
561 entry->req.iov_bidi_cnt = iov_cnt;
562 }
519 /* cmd's data_bitmap is what changed in process */ 563 /* cmd's data_bitmap is what changed in process */
520 bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap, 564 bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
521 DATA_BLOCK_BITS); 565 DATA_BLOCK_BITS);
@@ -592,19 +636,11 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
592 se_cmd->scsi_sense_length); 636 se_cmd->scsi_sense_length);
593 free_data_area(udev, cmd); 637 free_data_area(udev, cmd);
594 } else if (se_cmd->se_cmd_flags & SCF_BIDI) { 638 } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
595 DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
596
597 /* Get Data-In buffer before clean up */ 639 /* Get Data-In buffer before clean up */
598 bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS); 640 gather_data_area(udev, cmd, true);
599 gather_data_area(udev, bitmap,
600 se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
601 free_data_area(udev, cmd); 641 free_data_area(udev, cmd);
602 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 642 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
603 DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS); 643 gather_data_area(udev, cmd, false);
604
605 bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
606 gather_data_area(udev, bitmap,
607 se_cmd->t_data_sg, se_cmd->t_data_nents);
608 free_data_area(udev, cmd); 644 free_data_area(udev, cmd);
609 } else if (se_cmd->data_direction == DMA_TO_DEVICE) { 645 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
610 free_data_area(udev, cmd); 646 free_data_area(udev, cmd);
@@ -1196,11 +1232,6 @@ static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *pag
1196 if (ret < 0) 1232 if (ret < 0)
1197 return ret; 1233 return ret;
1198 1234
1199 if (!val) {
1200 pr_err("Illegal value for cmd_time_out\n");
1201 return -EINVAL;
1202 }
1203
1204 udev->cmd_time_out = val * MSEC_PER_SEC; 1235 udev->cmd_time_out = val * MSEC_PER_SEC;
1205 return count; 1236 return count;
1206} 1237}
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 91048eeca28b..69d0f430b2d1 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -107,8 +107,6 @@ struct cpufreq_cooling_device {
107}; 107};
108static DEFINE_IDA(cpufreq_ida); 108static DEFINE_IDA(cpufreq_ida);
109 109
110static unsigned int cpufreq_dev_count;
111
112static DEFINE_MUTEX(cooling_list_lock); 110static DEFINE_MUTEX(cooling_list_lock);
113static LIST_HEAD(cpufreq_dev_list); 111static LIST_HEAD(cpufreq_dev_list);
114 112
@@ -395,13 +393,20 @@ static int get_static_power(struct cpufreq_cooling_device *cpufreq_device,
395 393
396 opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz, 394 opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz,
397 true); 395 true);
396 if (IS_ERR(opp)) {
397 dev_warn_ratelimited(cpufreq_device->cpu_dev,
398 "Failed to find OPP for frequency %lu: %ld\n",
399 freq_hz, PTR_ERR(opp));
400 return -EINVAL;
401 }
402
398 voltage = dev_pm_opp_get_voltage(opp); 403 voltage = dev_pm_opp_get_voltage(opp);
399 dev_pm_opp_put(opp); 404 dev_pm_opp_put(opp);
400 405
401 if (voltage == 0) { 406 if (voltage == 0) {
402 dev_warn_ratelimited(cpufreq_device->cpu_dev, 407 dev_err_ratelimited(cpufreq_device->cpu_dev,
403 "Failed to get voltage for frequency %lu: %ld\n", 408 "Failed to get voltage for frequency %lu\n",
404 freq_hz, IS_ERR(opp) ? PTR_ERR(opp) : 0); 409 freq_hz);
405 return -EINVAL; 410 return -EINVAL;
406 } 411 }
407 412
@@ -693,9 +698,9 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
693 698
694 *state = cpufreq_cooling_get_level(cpu, target_freq); 699 *state = cpufreq_cooling_get_level(cpu, target_freq);
695 if (*state == THERMAL_CSTATE_INVALID) { 700 if (*state == THERMAL_CSTATE_INVALID) {
696 dev_warn_ratelimited(&cdev->device, 701 dev_err_ratelimited(&cdev->device,
697 "Failed to convert %dKHz for cpu %d into a cdev state\n", 702 "Failed to convert %dKHz for cpu %d into a cdev state\n",
698 target_freq, cpu); 703 target_freq, cpu);
699 return -EINVAL; 704 return -EINVAL;
700 } 705 }
701 706
@@ -771,6 +776,7 @@ __cpufreq_cooling_register(struct device_node *np,
771 unsigned int freq, i, num_cpus; 776 unsigned int freq, i, num_cpus;
772 int ret; 777 int ret;
773 struct thermal_cooling_device_ops *cooling_ops; 778 struct thermal_cooling_device_ops *cooling_ops;
779 bool first;
774 780
775 if (!alloc_cpumask_var(&temp_mask, GFP_KERNEL)) 781 if (!alloc_cpumask_var(&temp_mask, GFP_KERNEL))
776 return ERR_PTR(-ENOMEM); 782 return ERR_PTR(-ENOMEM);
@@ -874,13 +880,14 @@ __cpufreq_cooling_register(struct device_node *np,
874 cpufreq_dev->cool_dev = cool_dev; 880 cpufreq_dev->cool_dev = cool_dev;
875 881
876 mutex_lock(&cooling_list_lock); 882 mutex_lock(&cooling_list_lock);
883 /* Register the notifier for first cpufreq cooling device */
884 first = list_empty(&cpufreq_dev_list);
877 list_add(&cpufreq_dev->node, &cpufreq_dev_list); 885 list_add(&cpufreq_dev->node, &cpufreq_dev_list);
886 mutex_unlock(&cooling_list_lock);
878 887
879 /* Register the notifier for first cpufreq cooling device */ 888 if (first)
880 if (!cpufreq_dev_count++)
881 cpufreq_register_notifier(&thermal_cpufreq_notifier_block, 889 cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
882 CPUFREQ_POLICY_NOTIFIER); 890 CPUFREQ_POLICY_NOTIFIER);
883 mutex_unlock(&cooling_list_lock);
884 891
885 goto put_policy; 892 goto put_policy;
886 893
@@ -1021,6 +1028,7 @@ EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
1021void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) 1028void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
1022{ 1029{
1023 struct cpufreq_cooling_device *cpufreq_dev; 1030 struct cpufreq_cooling_device *cpufreq_dev;
1031 bool last;
1024 1032
1025 if (!cdev) 1033 if (!cdev)
1026 return; 1034 return;
@@ -1028,14 +1036,15 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
1028 cpufreq_dev = cdev->devdata; 1036 cpufreq_dev = cdev->devdata;
1029 1037
1030 mutex_lock(&cooling_list_lock); 1038 mutex_lock(&cooling_list_lock);
1039 list_del(&cpufreq_dev->node);
1031 /* Unregister the notifier for the last cpufreq cooling device */ 1040 /* Unregister the notifier for the last cpufreq cooling device */
1032 if (!--cpufreq_dev_count) 1041 last = list_empty(&cpufreq_dev_list);
1042 mutex_unlock(&cooling_list_lock);
1043
1044 if (last)
1033 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block, 1045 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
1034 CPUFREQ_POLICY_NOTIFIER); 1046 CPUFREQ_POLICY_NOTIFIER);
1035 1047
1036 list_del(&cpufreq_dev->node);
1037 mutex_unlock(&cooling_list_lock);
1038
1039 thermal_cooling_device_unregister(cpufreq_dev->cool_dev); 1048 thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
1040 ida_simple_remove(&cpufreq_ida, cpufreq_dev->id); 1049 ida_simple_remove(&cpufreq_ida, cpufreq_dev->id);
1041 kfree(cpufreq_dev->dyn_power_table); 1050 kfree(cpufreq_dev->dyn_power_table);
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index 7743a78d4723..4bf4ad58cffd 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -186,16 +186,22 @@ get_static_power(struct devfreq_cooling_device *dfc, unsigned long freq)
186 return 0; 186 return 0;
187 187
188 opp = dev_pm_opp_find_freq_exact(dev, freq, true); 188 opp = dev_pm_opp_find_freq_exact(dev, freq, true);
189 if (IS_ERR(opp) && (PTR_ERR(opp) == -ERANGE)) 189 if (PTR_ERR(opp) == -ERANGE)
190 opp = dev_pm_opp_find_freq_exact(dev, freq, false); 190 opp = dev_pm_opp_find_freq_exact(dev, freq, false);
191 191
192 if (IS_ERR(opp)) {
193 dev_err_ratelimited(dev, "Failed to find OPP for frequency %lu: %ld\n",
194 freq, PTR_ERR(opp));
195 return 0;
196 }
197
192 voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */ 198 voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */
193 dev_pm_opp_put(opp); 199 dev_pm_opp_put(opp);
194 200
195 if (voltage == 0) { 201 if (voltage == 0) {
196 dev_warn_ratelimited(dev, 202 dev_err_ratelimited(dev,
197 "Failed to get voltage for frequency %lu: %ld\n", 203 "Failed to get voltage for frequency %lu\n",
198 freq, IS_ERR(opp) ? PTR_ERR(opp) : 0); 204 freq);
199 return 0; 205 return 0;
200 } 206 }
201 207
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 6ee55a2d47bb..e65808c482f1 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -257,7 +257,7 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
257{ 257{
258 unsigned int baud = tty_termios_baud_rate(termios); 258 unsigned int baud = tty_termios_baud_rate(termios);
259 struct dw8250_data *d = p->private_data; 259 struct dw8250_data *d = p->private_data;
260 unsigned int rate; 260 long rate;
261 int ret; 261 int ret;
262 262
263 if (IS_ERR(d->clk) || !old) 263 if (IS_ERR(d->clk) || !old)
@@ -265,7 +265,12 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
265 265
266 clk_disable_unprepare(d->clk); 266 clk_disable_unprepare(d->clk);
267 rate = clk_round_rate(d->clk, baud * 16); 267 rate = clk_round_rate(d->clk, baud * 16);
268 ret = clk_set_rate(d->clk, rate); 268 if (rate < 0)
269 ret = rate;
270 else if (rate == 0)
271 ret = -ENOENT;
272 else
273 ret = clk_set_rate(d->clk, rate);
269 clk_prepare_enable(d->clk); 274 clk_prepare_enable(d->clk);
270 275
271 if (!ret) 276 if (!ret)
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index a65fb8197aec..0e3f529d50e9 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -128,9 +128,13 @@ config SERIAL_8250_PCI
128 by the parport_serial driver, enabled with CONFIG_PARPORT_SERIAL. 128 by the parport_serial driver, enabled with CONFIG_PARPORT_SERIAL.
129 129
130config SERIAL_8250_EXAR 130config SERIAL_8250_EXAR
131 tristate "8250/16550 PCI device support" 131 tristate "8250/16550 Exar/Commtech PCI/PCIe device support"
132 depends on SERIAL_8250_PCI 132 depends on SERIAL_8250_PCI
133 default SERIAL_8250 133 default SERIAL_8250
134 help
135 This builds support for XR17C1xx, XR17V3xx and some Commtech
136 422x PCIe serial cards that are not covered by the more generic
137 SERIAL_8250_PCI option.
134 138
135config SERIAL_8250_HP300 139config SERIAL_8250_HP300
136 tristate 140 tristate
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 8789ea423ccf..b0a377725d63 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2373,7 +2373,7 @@ static int __init pl011_console_match(struct console *co, char *name, int idx,
2373 if (strcmp(name, "qdf2400_e44") == 0) { 2373 if (strcmp(name, "qdf2400_e44") == 0) {
2374 pr_info_once("UART: Working around QDF2400 SoC erratum 44"); 2374 pr_info_once("UART: Working around QDF2400 SoC erratum 44");
2375 qdf2400_e44_present = true; 2375 qdf2400_e44_present = true;
2376 } else if (strcmp(name, "pl011") != 0 || strcmp(name, "ttyAMA") != 0) { 2376 } else if (strcmp(name, "pl011") != 0) {
2377 return -ENODEV; 2377 return -ENODEV;
2378 } 2378 }
2379 2379
@@ -2452,18 +2452,37 @@ static void pl011_early_write(struct console *con, const char *s, unsigned n)
2452 uart_console_write(&dev->port, s, n, pl011_putc); 2452 uart_console_write(&dev->port, s, n, pl011_putc);
2453} 2453}
2454 2454
2455/*
2456 * On non-ACPI systems, earlycon is enabled by specifying
2457 * "earlycon=pl011,<address>" on the kernel command line.
2458 *
2459 * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table,
2460 * by specifying only "earlycon" on the command line. Because it requires
2461 * SPCR, the console starts after ACPI is parsed, which is later than a
2462 * traditional early console.
2463 *
2464 * To get the traditional early console that starts before ACPI is parsed,
2465 * specify the full "earlycon=pl011,<address>" option.
2466 */
2455static int __init pl011_early_console_setup(struct earlycon_device *device, 2467static int __init pl011_early_console_setup(struct earlycon_device *device,
2456 const char *opt) 2468 const char *opt)
2457{ 2469{
2458 if (!device->port.membase) 2470 if (!device->port.membase)
2459 return -ENODEV; 2471 return -ENODEV;
2460 2472
2461 device->con->write = qdf2400_e44_present ? 2473 /* On QDF2400 SOCs affected by Erratum 44, the "qdf2400_e44" must
2462 qdf2400_e44_early_write : pl011_early_write; 2474 * also be specified, e.g. "earlycon=pl011,<address>,qdf2400_e44".
2475 */
2476 if (!strcmp(device->options, "qdf2400_e44"))
2477 device->con->write = qdf2400_e44_early_write;
2478 else
2479 device->con->write = pl011_early_write;
2480
2463 return 0; 2481 return 0;
2464} 2482}
2465OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup); 2483OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
2466OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup); 2484OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
2485EARLYCON_DECLARE(qdf2400_e44, pl011_early_console_setup);
2467 2486
2468#else 2487#else
2469#define AMBA_CONSOLE NULL 2488#define AMBA_CONSOLE NULL
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index dcebb28ffbc4..1f50a83ef958 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1951,6 +1951,11 @@ static void atmel_flush_buffer(struct uart_port *port)
1951 atmel_uart_writel(port, ATMEL_PDC_TCR, 0); 1951 atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
1952 atmel_port->pdc_tx.ofs = 0; 1952 atmel_port->pdc_tx.ofs = 0;
1953 } 1953 }
1954 /*
1955 * in uart_flush_buffer(), the xmit circular buffer has just
1956 * been cleared, so we have to reset tx_len accordingly.
1957 */
1958 atmel_port->tx_len = 0;
1954} 1959}
1955 1960
1956/* 1961/*
@@ -2483,6 +2488,9 @@ static void atmel_console_write(struct console *co, const char *s, u_int count)
2483 pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN; 2488 pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
2484 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 2489 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
2485 2490
2491 /* Make sure that tx path is actually able to send characters */
2492 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
2493
2486 uart_console_write(port, s, count, atmel_console_putchar); 2494 uart_console_write(port, s, count, atmel_console_putchar);
2487 2495
2488 /* 2496 /*
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 6989b227d134..be94246b6fcc 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1088,7 +1088,7 @@ static void mxs_auart_settermios(struct uart_port *u,
1088 AUART_LINECTRL_BAUD_DIV_MAX); 1088 AUART_LINECTRL_BAUD_DIV_MAX);
1089 baud_max = u->uartclk * 32 / AUART_LINECTRL_BAUD_DIV_MIN; 1089 baud_max = u->uartclk * 32 / AUART_LINECTRL_BAUD_DIV_MIN;
1090 baud = uart_get_baud_rate(u, termios, old, baud_min, baud_max); 1090 baud = uart_get_baud_rate(u, termios, old, baud_min, baud_max);
1091 div = u->uartclk * 32 / baud; 1091 div = DIV_ROUND_CLOSEST(u->uartclk * 32, baud);
1092 } 1092 }
1093 1093
1094 ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F); 1094 ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F);
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index bcf1d33e6ffe..c334bcc59c64 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -575,12 +575,13 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
575 pinctrl_select_state(ascport->pinctrl, 575 pinctrl_select_state(ascport->pinctrl,
576 ascport->states[NO_HW_FLOWCTRL]); 576 ascport->states[NO_HW_FLOWCTRL]);
577 577
578 gpiod = devm_get_gpiod_from_child(port->dev, "rts", 578 gpiod = devm_fwnode_get_gpiod_from_child(port->dev,
579 &np->fwnode); 579 "rts",
580 if (!IS_ERR(gpiod)) { 580 &np->fwnode,
581 gpiod_direction_output(gpiod, 0); 581 GPIOD_OUT_LOW,
582 np->name);
583 if (!IS_ERR(gpiod))
582 ascport->rts = gpiod; 584 ascport->rts = gpiod;
583 }
584 } 585 }
585 } 586 }
586 587
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 68947f6de5ad..e4603b09863a 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -271,10 +271,13 @@ const struct file_operations tty_ldiscs_proc_fops = {
271 271
272struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty) 272struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
273{ 273{
274 struct tty_ldisc *ld;
275
274 ldsem_down_read(&tty->ldisc_sem, MAX_SCHEDULE_TIMEOUT); 276 ldsem_down_read(&tty->ldisc_sem, MAX_SCHEDULE_TIMEOUT);
275 if (!tty->ldisc) 277 ld = tty->ldisc;
278 if (!ld)
276 ldsem_up_read(&tty->ldisc_sem); 279 ldsem_up_read(&tty->ldisc_sem);
277 return tty->ldisc; 280 return ld;
278} 281}
279EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait); 282EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
280 283
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index c5f0fc906136..8af8d9542663 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -28,7 +28,6 @@
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/sched/signal.h> 29#include <linux/sched/signal.h>
30#include <linux/sched/debug.h> 30#include <linux/sched/debug.h>
31#include <linux/sched/debug.h>
32#include <linux/tty.h> 31#include <linux/tty.h>
33#include <linux/tty_flip.h> 32#include <linux/tty_flip.h>
34#include <linux/mm.h> 33#include <linux/mm.h>
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index f03692ec5520..8fb309a0ff6b 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1381,7 +1381,7 @@ static int usbtmc_probe(struct usb_interface *intf,
1381 1381
1382 dev_dbg(&intf->dev, "%s called\n", __func__); 1382 dev_dbg(&intf->dev, "%s called\n", __func__);
1383 1383
1384 data = kmalloc(sizeof(*data), GFP_KERNEL); 1384 data = kzalloc(sizeof(*data), GFP_KERNEL);
1385 if (!data) 1385 if (!data)
1386 return -ENOMEM; 1386 return -ENOMEM;
1387 1387
@@ -1444,6 +1444,13 @@ static int usbtmc_probe(struct usb_interface *intf,
1444 break; 1444 break;
1445 } 1445 }
1446 } 1446 }
1447
1448 if (!data->bulk_out || !data->bulk_in) {
1449 dev_err(&intf->dev, "bulk endpoints not found\n");
1450 retcode = -ENODEV;
1451 goto err_put;
1452 }
1453
1447 /* Find int endpoint */ 1454 /* Find int endpoint */
1448 for (n = 0; n < iface_desc->desc.bNumEndpoints; n++) { 1455 for (n = 0; n < iface_desc->desc.bNumEndpoints; n++) {
1449 endpoint = &iface_desc->endpoint[n].desc; 1456 endpoint = &iface_desc->endpoint[n].desc;
@@ -1469,8 +1476,10 @@ static int usbtmc_probe(struct usb_interface *intf,
1469 if (data->iin_ep_present) { 1476 if (data->iin_ep_present) {
1470 /* allocate int urb */ 1477 /* allocate int urb */
1471 data->iin_urb = usb_alloc_urb(0, GFP_KERNEL); 1478 data->iin_urb = usb_alloc_urb(0, GFP_KERNEL);
1472 if (!data->iin_urb) 1479 if (!data->iin_urb) {
1480 retcode = -ENOMEM;
1473 goto error_register; 1481 goto error_register;
1482 }
1474 1483
1475 /* Protect interrupt in endpoint data until iin_urb is freed */ 1484 /* Protect interrupt in endpoint data until iin_urb is freed */
1476 kref_get(&data->kref); 1485 kref_get(&data->kref);
@@ -1478,8 +1487,10 @@ static int usbtmc_probe(struct usb_interface *intf,
1478 /* allocate buffer for interrupt in */ 1487 /* allocate buffer for interrupt in */
1479 data->iin_buffer = kmalloc(data->iin_wMaxPacketSize, 1488 data->iin_buffer = kmalloc(data->iin_wMaxPacketSize,
1480 GFP_KERNEL); 1489 GFP_KERNEL);
1481 if (!data->iin_buffer) 1490 if (!data->iin_buffer) {
1491 retcode = -ENOMEM;
1482 goto error_register; 1492 goto error_register;
1493 }
1483 1494
1484 /* fill interrupt urb */ 1495 /* fill interrupt urb */
1485 usb_fill_int_urb(data->iin_urb, data->usb_dev, 1496 usb_fill_int_urb(data->iin_urb, data->usb_dev,
@@ -1512,6 +1523,7 @@ error_register:
1512 sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp); 1523 sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
1513 sysfs_remove_group(&intf->dev.kobj, &data_attr_grp); 1524 sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
1514 usbtmc_free_int(data); 1525 usbtmc_free_int(data);
1526err_put:
1515 kref_put(&data->kref, usbtmc_delete); 1527 kref_put(&data->kref, usbtmc_delete);
1516 return retcode; 1528 return retcode;
1517} 1529}
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 25dbd8c7aec7..4be52c602e9b 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -280,6 +280,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
280 280
281 /* 281 /*
282 * Adjust bInterval for quirked devices. 282 * Adjust bInterval for quirked devices.
283 */
284 /*
285 * This quirk fixes bIntervals reported in ms.
286 */
287 if (to_usb_device(ddev)->quirks &
288 USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) {
289 n = clamp(fls(d->bInterval) + 3, i, j);
290 i = j = n;
291 }
292 /*
283 * This quirk fixes bIntervals reported in 293 * This quirk fixes bIntervals reported in
284 * linear microframes. 294 * linear microframes.
285 */ 295 */
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 612fab6e54fb..79bdca5cb9c7 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -520,8 +520,10 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
520 */ 520 */
521 tbuf_size = max_t(u16, sizeof(struct usb_hub_descriptor), wLength); 521 tbuf_size = max_t(u16, sizeof(struct usb_hub_descriptor), wLength);
522 tbuf = kzalloc(tbuf_size, GFP_KERNEL); 522 tbuf = kzalloc(tbuf_size, GFP_KERNEL);
523 if (!tbuf) 523 if (!tbuf) {
524 return -ENOMEM; 524 status = -ENOMEM;
525 goto err_alloc;
526 }
525 527
526 bufp = tbuf; 528 bufp = tbuf;
527 529
@@ -734,6 +736,7 @@ error:
734 } 736 }
735 737
736 kfree(tbuf); 738 kfree(tbuf);
739 err_alloc:
737 740
738 /* any errors get returned through the urb completion */ 741 /* any errors get returned through the urb completion */
739 spin_lock_irq(&hcd_root_hub_lock); 742 spin_lock_irq(&hcd_root_hub_lock);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index f0dd08198d74..5286bf67869a 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4275,7 +4275,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
4275 struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); 4275 struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
4276 int connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN; 4276 int connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN;
4277 4277
4278 if (!udev->usb2_hw_lpm_capable) 4278 if (!udev->usb2_hw_lpm_capable || !udev->bos)
4279 return; 4279 return;
4280 4280
4281 if (hub) 4281 if (hub)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 24f9f98968a5..96b21b0dac1e 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -170,6 +170,14 @@ static const struct usb_device_id usb_quirk_list[] = {
170 /* M-Systems Flash Disk Pioneers */ 170 /* M-Systems Flash Disk Pioneers */
171 { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, 171 { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
172 172
173 /* Baum Vario Ultra */
174 { USB_DEVICE(0x0904, 0x6101), .driver_info =
175 USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
176 { USB_DEVICE(0x0904, 0x6102), .driver_info =
177 USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
178 { USB_DEVICE(0x0904, 0x6103), .driver_info =
179 USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
180
173 /* Keytouch QWERTY Panel keyboard */ 181 /* Keytouch QWERTY Panel keyboard */
174 { USB_DEVICE(0x0926, 0x3333), .driver_info = 182 { USB_DEVICE(0x0926, 0x3333), .driver_info =
175 USB_QUIRK_CONFIG_INTF_STRINGS }, 183 USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 0d75158e43fe..79e7a3480d51 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -171,6 +171,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
171 int status) 171 int status)
172{ 172{
173 struct dwc3 *dwc = dep->dwc; 173 struct dwc3 *dwc = dep->dwc;
174 unsigned int unmap_after_complete = false;
174 175
175 req->started = false; 176 req->started = false;
176 list_del(&req->list); 177 list_del(&req->list);
@@ -180,11 +181,19 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
180 if (req->request.status == -EINPROGRESS) 181 if (req->request.status == -EINPROGRESS)
181 req->request.status = status; 182 req->request.status = status;
182 183
183 if (dwc->ep0_bounced && dep->number <= 1) 184 /*
185 * NOTICE we don't want to unmap before calling ->complete() if we're
186 * dealing with a bounced ep0 request. If we unmap it here, we would end
187 * up overwritting the contents of req->buf and this could confuse the
188 * gadget driver.
189 */
190 if (dwc->ep0_bounced && dep->number <= 1) {
184 dwc->ep0_bounced = false; 191 dwc->ep0_bounced = false;
185 192 unmap_after_complete = true;
186 usb_gadget_unmap_request_by_dev(dwc->sysdev, 193 } else {
187 &req->request, req->direction); 194 usb_gadget_unmap_request_by_dev(dwc->sysdev,
195 &req->request, req->direction);
196 }
188 197
189 trace_dwc3_gadget_giveback(req); 198 trace_dwc3_gadget_giveback(req);
190 199
@@ -192,6 +201,10 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
192 usb_gadget_giveback_request(&dep->endpoint, &req->request); 201 usb_gadget_giveback_request(&dep->endpoint, &req->request);
193 spin_lock(&dwc->lock); 202 spin_lock(&dwc->lock);
194 203
204 if (unmap_after_complete)
205 usb_gadget_unmap_request_by_dev(dwc->sysdev,
206 &req->request, req->direction);
207
195 if (dep->number > 1) 208 if (dep->number > 1)
196 pm_runtime_put(dwc->dev); 209 pm_runtime_put(dwc->dev);
197} 210}
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index a30766ca4226..5e3828d9dac7 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -535,13 +535,15 @@ static int acm_notify_serial_state(struct f_acm *acm)
535{ 535{
536 struct usb_composite_dev *cdev = acm->port.func.config->cdev; 536 struct usb_composite_dev *cdev = acm->port.func.config->cdev;
537 int status; 537 int status;
538 __le16 serial_state;
538 539
539 spin_lock(&acm->lock); 540 spin_lock(&acm->lock);
540 if (acm->notify_req) { 541 if (acm->notify_req) {
541 dev_dbg(&cdev->gadget->dev, "acm ttyGS%d serial state %04x\n", 542 dev_dbg(&cdev->gadget->dev, "acm ttyGS%d serial state %04x\n",
542 acm->port_num, acm->serial_state); 543 acm->port_num, acm->serial_state);
544 serial_state = cpu_to_le16(acm->serial_state);
543 status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE, 545 status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
544 0, &acm->serial_state, sizeof(acm->serial_state)); 546 0, &serial_state, sizeof(acm->serial_state));
545 } else { 547 } else {
546 acm->pending = true; 548 acm->pending = true;
547 status = 0; 549 status = 0;
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 89b48bcc377a..5eea44823ca0 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -367,7 +367,7 @@ try_again:
367 count = min_t(unsigned, count, hidg->report_length); 367 count = min_t(unsigned, count, hidg->report_length);
368 368
369 spin_unlock_irqrestore(&hidg->write_spinlock, flags); 369 spin_unlock_irqrestore(&hidg->write_spinlock, flags);
370 status = copy_from_user(hidg->req->buf, buffer, count); 370 status = copy_from_user(req->buf, buffer, count);
371 371
372 if (status != 0) { 372 if (status != 0) {
373 ERROR(hidg->func.config->cdev, 373 ERROR(hidg->func.config->cdev,
@@ -378,9 +378,9 @@ try_again:
378 378
379 spin_lock_irqsave(&hidg->write_spinlock, flags); 379 spin_lock_irqsave(&hidg->write_spinlock, flags);
380 380
381 /* we our function has been disabled by host */ 381 /* when our function has been disabled by host */
382 if (!hidg->req) { 382 if (!hidg->req) {
383 free_ep_req(hidg->in_ep, hidg->req); 383 free_ep_req(hidg->in_ep, req);
384 /* 384 /*
385 * TODO 385 * TODO
386 * Should we fail with error here? 386 * Should we fail with error here?
@@ -394,7 +394,7 @@ try_again:
394 req->complete = f_hidg_req_complete; 394 req->complete = f_hidg_req_complete;
395 req->context = hidg; 395 req->context = hidg;
396 396
397 status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC); 397 status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
398 if (status < 0) { 398 if (status < 0) {
399 ERROR(hidg->func.config->cdev, 399 ERROR(hidg->func.config->cdev,
400 "usb_ep_queue error on int endpoint %zd\n", status); 400 "usb_ep_queue error on int endpoint %zd\n", status);
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index d2351139342f..a82e2bd5ea34 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -373,7 +373,7 @@ static void bot_cleanup_old_alt(struct f_uas *fu)
373 usb_ep_free_request(fu->ep_in, fu->bot_req_in); 373 usb_ep_free_request(fu->ep_in, fu->bot_req_in);
374 usb_ep_free_request(fu->ep_out, fu->bot_req_out); 374 usb_ep_free_request(fu->ep_out, fu->bot_req_out);
375 usb_ep_free_request(fu->ep_out, fu->cmd.req); 375 usb_ep_free_request(fu->ep_out, fu->cmd.req);
376 usb_ep_free_request(fu->ep_out, fu->bot_status.req); 376 usb_ep_free_request(fu->ep_in, fu->bot_status.req);
377 377
378 kfree(fu->cmd.buf); 378 kfree(fu->cmd.buf);
379 379
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 29b41b5dee04..f8a1881609a2 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -594,6 +594,14 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
594 opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U); 594 opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U);
595 opts->streaming_maxburst = min(opts->streaming_maxburst, 15U); 595 opts->streaming_maxburst = min(opts->streaming_maxburst, 15U);
596 596
597 /* For SS, wMaxPacketSize has to be 1024 if bMaxBurst is not 0 */
598 if (opts->streaming_maxburst &&
599 (opts->streaming_maxpacket % 1024) != 0) {
600 opts->streaming_maxpacket = roundup(opts->streaming_maxpacket, 1024);
601 INFO(cdev, "overriding streaming_maxpacket to %d\n",
602 opts->streaming_maxpacket);
603 }
604
597 /* Fill in the FS/HS/SS Video Streaming specific descriptors from the 605 /* Fill in the FS/HS/SS Video Streaming specific descriptors from the
598 * module parameters. 606 * module parameters.
599 * 607 *
@@ -625,7 +633,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
625 uvc_ss_streaming_comp.bMaxBurst = opts->streaming_maxburst; 633 uvc_ss_streaming_comp.bMaxBurst = opts->streaming_maxburst;
626 uvc_ss_streaming_comp.wBytesPerInterval = 634 uvc_ss_streaming_comp.wBytesPerInterval =
627 cpu_to_le16(max_packet_size * max_packet_mult * 635 cpu_to_le16(max_packet_size * max_packet_mult *
628 opts->streaming_maxburst); 636 (opts->streaming_maxburst + 1));
629 637
630 /* Allocate endpoints. */ 638 /* Allocate endpoints. */
631 ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep); 639 ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep);
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index a97da645c1b9..8a365aad66fe 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -1523,7 +1523,6 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1523 td = phys_to_virt(addr); 1523 td = phys_to_virt(addr);
1524 addr2 = (dma_addr_t)td->next; 1524 addr2 = (dma_addr_t)td->next;
1525 pci_pool_free(dev->data_requests, td, addr); 1525 pci_pool_free(dev->data_requests, td, addr);
1526 td->next = 0x00;
1527 addr = addr2; 1526 addr = addr2;
1528 } 1527 }
1529 req->chain_len = 1; 1528 req->chain_len = 1;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index bd02a6cd8e2c..6ed468fa7d5e 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -344,6 +344,7 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
344static struct platform_driver usb_xhci_driver = { 344static struct platform_driver usb_xhci_driver = {
345 .probe = xhci_plat_probe, 345 .probe = xhci_plat_probe,
346 .remove = xhci_plat_remove, 346 .remove = xhci_plat_remove,
347 .shutdown = usb_hcd_platform_shutdown,
347 .driver = { 348 .driver = {
348 .name = "xhci-hcd", 349 .name = "xhci-hcd",
349 .pm = DEV_PM_OPS, 350 .pm = DEV_PM_OPS,
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index d9936c771fa0..a3309aa02993 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1989,6 +1989,9 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1989 case TRB_NORMAL: 1989 case TRB_NORMAL:
1990 td->urb->actual_length = requested - remaining; 1990 td->urb->actual_length = requested - remaining;
1991 goto finish_td; 1991 goto finish_td;
1992 case TRB_STATUS:
1993 td->urb->actual_length = requested;
1994 goto finish_td;
1992 default: 1995 default:
1993 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n", 1996 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
1994 trb_type); 1997 trb_type);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 50aee8b7718b..953fd8f62df0 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1477,6 +1477,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1477 struct xhci_ring *ep_ring; 1477 struct xhci_ring *ep_ring;
1478 struct xhci_virt_ep *ep; 1478 struct xhci_virt_ep *ep;
1479 struct xhci_command *command; 1479 struct xhci_command *command;
1480 struct xhci_virt_device *vdev;
1480 1481
1481 xhci = hcd_to_xhci(hcd); 1482 xhci = hcd_to_xhci(hcd);
1482 spin_lock_irqsave(&xhci->lock, flags); 1483 spin_lock_irqsave(&xhci->lock, flags);
@@ -1485,15 +1486,27 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1485 1486
1486 /* Make sure the URB hasn't completed or been unlinked already */ 1487 /* Make sure the URB hasn't completed or been unlinked already */
1487 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 1488 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1488 if (ret || !urb->hcpriv) 1489 if (ret)
1489 goto done; 1490 goto done;
1491
1492 /* give back URB now if we can't queue it for cancel */
1493 vdev = xhci->devs[urb->dev->slot_id];
1494 urb_priv = urb->hcpriv;
1495 if (!vdev || !urb_priv)
1496 goto err_giveback;
1497
1498 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1499 ep = &vdev->eps[ep_index];
1500 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1501 if (!ep || !ep_ring)
1502 goto err_giveback;
1503
1490 temp = readl(&xhci->op_regs->status); 1504 temp = readl(&xhci->op_regs->status);
1491 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { 1505 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1492 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1506 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1493 "HW died, freeing TD."); 1507 "HW died, freeing TD.");
1494 urb_priv = urb->hcpriv;
1495 for (i = urb_priv->num_tds_done; 1508 for (i = urb_priv->num_tds_done;
1496 i < urb_priv->num_tds && xhci->devs[urb->dev->slot_id]; 1509 i < urb_priv->num_tds;
1497 i++) { 1510 i++) {
1498 td = &urb_priv->td[i]; 1511 td = &urb_priv->td[i];
1499 if (!list_empty(&td->td_list)) 1512 if (!list_empty(&td->td_list))
@@ -1501,23 +1514,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1501 if (!list_empty(&td->cancelled_td_list)) 1514 if (!list_empty(&td->cancelled_td_list))
1502 list_del_init(&td->cancelled_td_list); 1515 list_del_init(&td->cancelled_td_list);
1503 } 1516 }
1504 1517 goto err_giveback;
1505 usb_hcd_unlink_urb_from_ep(hcd, urb);
1506 spin_unlock_irqrestore(&xhci->lock, flags);
1507 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1508 xhci_urb_free_priv(urb_priv);
1509 return ret;
1510 } 1518 }
1511 1519
1512 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1513 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1514 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1515 if (!ep_ring) {
1516 ret = -EINVAL;
1517 goto done;
1518 }
1519
1520 urb_priv = urb->hcpriv;
1521 i = urb_priv->num_tds_done; 1520 i = urb_priv->num_tds_done;
1522 if (i < urb_priv->num_tds) 1521 if (i < urb_priv->num_tds)
1523 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1522 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@@ -1554,6 +1553,14 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1554done: 1553done:
1555 spin_unlock_irqrestore(&xhci->lock, flags); 1554 spin_unlock_irqrestore(&xhci->lock, flags);
1556 return ret; 1555 return ret;
1556
1557err_giveback:
1558 if (urb_priv)
1559 xhci_urb_free_priv(urb_priv);
1560 usb_hcd_unlink_urb_from_ep(hcd, urb);
1561 spin_unlock_irqrestore(&xhci->lock, flags);
1562 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1563 return ret;
1557} 1564}
1558 1565
1559/* Drop an endpoint from a new bandwidth configuration for this device. 1566/* Drop an endpoint from a new bandwidth configuration for this device.
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 8b9fd7534f69..502bfe30a077 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -347,6 +347,9 @@ static int idmouse_probe(struct usb_interface *interface,
347 if (iface_desc->desc.bInterfaceClass != 0x0A) 347 if (iface_desc->desc.bInterfaceClass != 0x0A)
348 return -ENODEV; 348 return -ENODEV;
349 349
350 if (iface_desc->desc.bNumEndpoints < 1)
351 return -ENODEV;
352
350 /* allocate memory for our device state and initialize it */ 353 /* allocate memory for our device state and initialize it */
351 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 354 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
352 if (dev == NULL) 355 if (dev == NULL)
diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c
index 77176511658f..d3d124753266 100644
--- a/drivers/usb/misc/lvstest.c
+++ b/drivers/usb/misc/lvstest.c
@@ -366,6 +366,10 @@ static int lvs_rh_probe(struct usb_interface *intf,
366 366
367 hdev = interface_to_usbdev(intf); 367 hdev = interface_to_usbdev(intf);
368 desc = intf->cur_altsetting; 368 desc = intf->cur_altsetting;
369
370 if (desc->desc.bNumEndpoints < 1)
371 return -ENODEV;
372
369 endpoint = &desc->endpoint[0].desc; 373 endpoint = &desc->endpoint[0].desc;
370 374
371 /* valid only for SS root hub */ 375 /* valid only for SS root hub */
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index e45a3a680db8..07014cad6dbe 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -709,6 +709,11 @@ static int uss720_probe(struct usb_interface *intf,
709 709
710 interface = intf->cur_altsetting; 710 interface = intf->cur_altsetting;
711 711
712 if (interface->desc.bNumEndpoints < 3) {
713 usb_put_dev(usbdev);
714 return -ENODEV;
715 }
716
712 /* 717 /*
713 * Allocate parport interface 718 * Allocate parport interface
714 */ 719 */
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index d8bae6ca8904..0c3664ab705e 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2490,8 +2490,8 @@ static int musb_remove(struct platform_device *pdev)
2490 musb_host_cleanup(musb); 2490 musb_host_cleanup(musb);
2491 musb_gadget_cleanup(musb); 2491 musb_gadget_cleanup(musb);
2492 2492
2493 spin_lock_irqsave(&musb->lock, flags);
2494 musb_platform_disable(musb); 2493 musb_platform_disable(musb);
2494 spin_lock_irqsave(&musb->lock, flags);
2495 musb_disable_interrupts(musb); 2495 musb_disable_interrupts(musb);
2496 musb_writeb(musb->mregs, MUSB_DEVCTL, 0); 2496 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2497 spin_unlock_irqrestore(&musb->lock, flags); 2497 spin_unlock_irqrestore(&musb->lock, flags);
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index 00e272bfee39..355655f8a3fb 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -238,8 +238,27 @@ static void cppi41_dma_callback(void *private_data,
238 transferred < cppi41_channel->packet_sz) 238 transferred < cppi41_channel->packet_sz)
239 cppi41_channel->prog_len = 0; 239 cppi41_channel->prog_len = 0;
240 240
241 if (cppi41_channel->is_tx) 241 if (cppi41_channel->is_tx) {
242 empty = musb_is_tx_fifo_empty(hw_ep); 242 u8 type;
243
244 if (is_host_active(musb))
245 type = hw_ep->out_qh->type;
246 else
247 type = hw_ep->ep_in.type;
248
249 if (type == USB_ENDPOINT_XFER_ISOC)
250 /*
251 * Don't use the early-TX-interrupt workaround below
252 * for Isoch transfter. Since Isoch are periodic
253 * transfer, by the time the next transfer is
254 * scheduled, the current one should be done already.
255 *
256 * This avoids audio playback underrun issue.
257 */
258 empty = true;
259 else
260 empty = musb_is_tx_fifo_empty(hw_ep);
261 }
243 262
244 if (!cppi41_channel->is_tx || empty) { 263 if (!cppi41_channel->is_tx || empty) {
245 cppi41_trans_done(cppi41_channel); 264 cppi41_trans_done(cppi41_channel);
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 7c047c4a2565..9c7ee26ef388 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -933,7 +933,7 @@ static int dsps_probe(struct platform_device *pdev)
933 if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) { 933 if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) {
934 ret = dsps_setup_optional_vbus_irq(pdev, glue); 934 ret = dsps_setup_optional_vbus_irq(pdev, glue);
935 if (ret) 935 if (ret)
936 return ret; 936 goto err_iounmap;
937 } 937 }
938 938
939 platform_set_drvdata(pdev, glue); 939 platform_set_drvdata(pdev, glue);
@@ -946,6 +946,8 @@ static int dsps_probe(struct platform_device *pdev)
946 946
947err: 947err:
948 pm_runtime_disable(&pdev->dev); 948 pm_runtime_disable(&pdev->dev);
949err_iounmap:
950 iounmap(glue->usbss_base);
949 return ret; 951 return ret;
950} 952}
951 953
@@ -956,6 +958,7 @@ static int dsps_remove(struct platform_device *pdev)
956 platform_device_unregister(glue->musb); 958 platform_device_unregister(glue->musb);
957 959
958 pm_runtime_disable(&pdev->dev); 960 pm_runtime_disable(&pdev->dev);
961 iounmap(glue->usbss_base);
959 962
960 return 0; 963 return 0;
961} 964}
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
index b3b33cf7ddf6..f333024660b4 100644
--- a/drivers/usb/phy/phy-isp1301.c
+++ b/drivers/usb/phy/phy-isp1301.c
@@ -136,7 +136,7 @@ static int isp1301_remove(struct i2c_client *client)
136static struct i2c_driver isp1301_driver = { 136static struct i2c_driver isp1301_driver = {
137 .driver = { 137 .driver = {
138 .name = DRV_NAME, 138 .name = DRV_NAME,
139 .of_match_table = of_match_ptr(isp1301_of_match), 139 .of_match_table = isp1301_of_match,
140 }, 140 },
141 .probe = isp1301_probe, 141 .probe = isp1301_probe,
142 .remove = isp1301_remove, 142 .remove = isp1301_remove,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 42cc72e54c05..af67a0de6b5d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -233,6 +233,14 @@ static void option_instat_callback(struct urb *urb);
233#define BANDRICH_PRODUCT_1012 0x1012 233#define BANDRICH_PRODUCT_1012 0x1012
234 234
235#define QUALCOMM_VENDOR_ID 0x05C6 235#define QUALCOMM_VENDOR_ID 0x05C6
236/* These Quectel products use Qualcomm's vendor ID */
237#define QUECTEL_PRODUCT_UC20 0x9003
238#define QUECTEL_PRODUCT_UC15 0x9090
239
240#define QUECTEL_VENDOR_ID 0x2c7c
241/* These Quectel products use Quectel's vendor ID */
242#define QUECTEL_PRODUCT_EC21 0x0121
243#define QUECTEL_PRODUCT_EC25 0x0125
236 244
237#define CMOTECH_VENDOR_ID 0x16d8 245#define CMOTECH_VENDOR_ID 0x16d8
238#define CMOTECH_PRODUCT_6001 0x6001 246#define CMOTECH_PRODUCT_6001 0x6001
@@ -1161,7 +1169,14 @@ static const struct usb_device_id option_ids[] = {
1161 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 1169 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
1162 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ 1170 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
1163 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ 1171 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
1164 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */ 1172 /* Quectel products using Qualcomm vendor ID */
1173 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
1174 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
1175 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1176 /* Quectel products using Quectel vendor ID */
1177 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
1178 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1179 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
1165 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1180 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1166 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, 1181 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1167 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, 1182 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 696458db7e3c..38b3f0d8cd58 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -169,6 +169,8 @@ static const struct usb_device_id id_table[] = {
169 {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ 169 {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
170 {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ 170 {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
171 {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ 171 {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
172 {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
173 {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
172 174
173 /* Huawei devices */ 175 /* Huawei devices */
174 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ 176 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
index 252c7bd9218a..d01496fd27fe 100644
--- a/drivers/usb/wusbcore/wa-hc.c
+++ b/drivers/usb/wusbcore/wa-hc.c
@@ -39,6 +39,9 @@ int wa_create(struct wahc *wa, struct usb_interface *iface,
39 int result; 39 int result;
40 struct device *dev = &iface->dev; 40 struct device *dev = &iface->dev;
41 41
42 if (iface->cur_altsetting->desc.bNumEndpoints < 3)
43 return -ENODEV;
44
42 result = wa_rpipes_create(wa); 45 result = wa_rpipes_create(wa);
43 if (result < 0) 46 if (result < 0)
44 goto error_rpipes_create; 47 goto error_rpipes_create;
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 0aa6c3c29d17..35a1e777b449 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -823,6 +823,9 @@ static int hwarc_probe(struct usb_interface *iface,
823 struct hwarc *hwarc; 823 struct hwarc *hwarc;
824 struct device *dev = &iface->dev; 824 struct device *dev = &iface->dev;
825 825
826 if (iface->cur_altsetting->desc.bNumEndpoints < 1)
827 return -ENODEV;
828
826 result = -ENOMEM; 829 result = -ENOMEM;
827 uwb_rc = uwb_rc_alloc(); 830 uwb_rc = uwb_rc_alloc();
828 if (uwb_rc == NULL) { 831 if (uwb_rc == NULL) {
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
index 2bfc846ac071..6345e85822a4 100644
--- a/drivers/uwb/i1480/dfu/usb.c
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -362,6 +362,9 @@ int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
362 result); 362 result);
363 } 363 }
364 364
365 if (iface->cur_altsetting->desc.bNumEndpoints < 1)
366 return -ENODEV;
367
365 result = -ENOMEM; 368 result = -ENOMEM;
366 i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL); 369 i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL);
367 if (i1480_usb == NULL) { 370 if (i1480_usb == NULL) {
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 609f4f982c74..561084ab387f 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -403,6 +403,7 @@ static void vfio_group_release(struct kref *kref)
403 struct iommu_group *iommu_group = group->iommu_group; 403 struct iommu_group *iommu_group = group->iommu_group;
404 404
405 WARN_ON(!list_empty(&group->device_list)); 405 WARN_ON(!list_empty(&group->device_list));
406 WARN_ON(group->notifier.head);
406 407
407 list_for_each_entry_safe(unbound, tmp, 408 list_for_each_entry_safe(unbound, tmp,
408 &group->unbound_list, unbound_next) { 409 &group->unbound_list, unbound_next) {
@@ -1573,6 +1574,10 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep)
1573 return -EBUSY; 1574 return -EBUSY;
1574 } 1575 }
1575 1576
1577 /* Warn if previous user didn't cleanup and re-init to drop them */
1578 if (WARN_ON(group->notifier.head))
1579 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
1580
1576 filep->private_data = group; 1581 filep->private_data = group;
1577 1582
1578 return 0; 1583 return 0;
@@ -1584,9 +1589,6 @@ static int vfio_group_fops_release(struct inode *inode, struct file *filep)
1584 1589
1585 filep->private_data = NULL; 1590 filep->private_data = NULL;
1586 1591
1587 /* Any user didn't unregister? */
1588 WARN_ON(group->notifier.head);
1589
1590 vfio_group_try_dissolve_container(group); 1592 vfio_group_try_dissolve_container(group);
1591 1593
1592 atomic_dec(&group->opened); 1594 atomic_dec(&group->opened);
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index c26fa1f3ed86..32d2633092a3 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -1182,8 +1182,7 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
1182 return NULL; 1182 return NULL;
1183} 1183}
1184 1184
1185static bool vfio_iommu_has_resv_msi(struct iommu_group *group, 1185static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base)
1186 phys_addr_t *base)
1187{ 1186{
1188 struct list_head group_resv_regions; 1187 struct list_head group_resv_regions;
1189 struct iommu_resv_region *region, *next; 1188 struct iommu_resv_region *region, *next;
@@ -1192,7 +1191,7 @@ static bool vfio_iommu_has_resv_msi(struct iommu_group *group,
1192 INIT_LIST_HEAD(&group_resv_regions); 1191 INIT_LIST_HEAD(&group_resv_regions);
1193 iommu_get_group_resv_regions(group, &group_resv_regions); 1192 iommu_get_group_resv_regions(group, &group_resv_regions);
1194 list_for_each_entry(region, &group_resv_regions, list) { 1193 list_for_each_entry(region, &group_resv_regions, list) {
1195 if (region->type & IOMMU_RESV_MSI) { 1194 if (region->type == IOMMU_RESV_SW_MSI) {
1196 *base = region->start; 1195 *base = region->start;
1197 ret = true; 1196 ret = true;
1198 goto out; 1197 goto out;
@@ -1283,7 +1282,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
1283 if (ret) 1282 if (ret)
1284 goto out_domain; 1283 goto out_domain;
1285 1284
1286 resv_msi = vfio_iommu_has_resv_msi(iommu_group, &resv_msi_base); 1285 resv_msi = vfio_iommu_has_sw_msi(iommu_group, &resv_msi_base);
1287 1286
1288 INIT_LIST_HEAD(&domain->group_list); 1287 INIT_LIST_HEAD(&domain->group_list);
1289 list_add(&group->next, &domain->group_list); 1288 list_add(&group->next, &domain->group_list);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index ce5e63d2c66a..44eed8eb0725 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -223,6 +223,46 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
223 return len; 223 return len;
224} 224}
225 225
226static int
227vhost_transport_cancel_pkt(struct vsock_sock *vsk)
228{
229 struct vhost_vsock *vsock;
230 struct virtio_vsock_pkt *pkt, *n;
231 int cnt = 0;
232 LIST_HEAD(freeme);
233
234 /* Find the vhost_vsock according to guest context id */
235 vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
236 if (!vsock)
237 return -ENODEV;
238
239 spin_lock_bh(&vsock->send_pkt_list_lock);
240 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
241 if (pkt->vsk != vsk)
242 continue;
243 list_move(&pkt->list, &freeme);
244 }
245 spin_unlock_bh(&vsock->send_pkt_list_lock);
246
247 list_for_each_entry_safe(pkt, n, &freeme, list) {
248 if (pkt->reply)
249 cnt++;
250 list_del(&pkt->list);
251 virtio_transport_free_pkt(pkt);
252 }
253
254 if (cnt) {
255 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
256 int new_cnt;
257
258 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
259 if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
260 vhost_poll_queue(&tx_vq->poll);
261 }
262
263 return 0;
264}
265
226static struct virtio_vsock_pkt * 266static struct virtio_vsock_pkt *
227vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, 267vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
228 unsigned int out, unsigned int in) 268 unsigned int out, unsigned int in)
@@ -675,6 +715,7 @@ static struct virtio_transport vhost_transport = {
675 .release = virtio_transport_release, 715 .release = virtio_transport_release,
676 .connect = virtio_transport_connect, 716 .connect = virtio_transport_connect,
677 .shutdown = virtio_transport_shutdown, 717 .shutdown = virtio_transport_shutdown,
718 .cancel_pkt = vhost_transport_cancel_pkt,
678 719
679 .dgram_enqueue = virtio_transport_dgram_enqueue, 720 .dgram_enqueue = virtio_transport_dgram_enqueue,
680 .dgram_dequeue = virtio_transport_dgram_dequeue, 721 .dgram_dequeue = virtio_transport_dgram_dequeue,
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index d7efcb632f7d..002f1ce22bd0 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -297,14 +297,15 @@ static int pwm_backlight_probe(struct platform_device *pdev)
297 } 297 }
298 298
299 /* 299 /*
300 * If the GPIO is configured as input, change the direction to output 300 * If the GPIO is not known to be already configured as output, that
301 * and set the GPIO as active. 301 * is, if gpiod_get_direction returns either GPIOF_DIR_IN or -EINVAL,
302 * change the direction to output and set the GPIO as active.
302 * Do not force the GPIO to active when it was already output as it 303 * Do not force the GPIO to active when it was already output as it
303 * could cause backlight flickering or we would enable the backlight too 304 * could cause backlight flickering or we would enable the backlight too
304 * early. Leave the decision of the initial backlight state for later. 305 * early. Leave the decision of the initial backlight state for later.
305 */ 306 */
306 if (pb->enable_gpio && 307 if (pb->enable_gpio &&
307 gpiod_get_direction(pb->enable_gpio) == GPIOF_DIR_IN) 308 gpiod_get_direction(pb->enable_gpio) != GPIOF_DIR_OUT)
308 gpiod_direction_output(pb->enable_gpio, 1); 309 gpiod_direction_output(pb->enable_gpio, 1);
309 310
310 pb->power_supply = devm_regulator_get(&pdev->dev, "power"); 311 pb->power_supply = devm_regulator_get(&pdev->dev, "power");
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 8c4dc1e1f94f..b827a8113e26 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -10,6 +10,7 @@
10#include <linux/efi.h> 10#include <linux/efi.h>
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/fb.h> 12#include <linux/fb.h>
13#include <linux/pci.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
14#include <linux/screen_info.h> 15#include <linux/screen_info.h>
15#include <video/vga.h> 16#include <video/vga.h>
@@ -143,6 +144,8 @@ static struct attribute *efifb_attrs[] = {
143}; 144};
144ATTRIBUTE_GROUPS(efifb); 145ATTRIBUTE_GROUPS(efifb);
145 146
147static bool pci_dev_disabled; /* FB base matches BAR of a disabled device */
148
146static int efifb_probe(struct platform_device *dev) 149static int efifb_probe(struct platform_device *dev)
147{ 150{
148 struct fb_info *info; 151 struct fb_info *info;
@@ -152,7 +155,7 @@ static int efifb_probe(struct platform_device *dev)
152 unsigned int size_total; 155 unsigned int size_total;
153 char *option = NULL; 156 char *option = NULL;
154 157
155 if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) 158 if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
156 return -ENODEV; 159 return -ENODEV;
157 160
158 if (fb_get_options("efifb", &option)) 161 if (fb_get_options("efifb", &option))
@@ -360,3 +363,64 @@ static struct platform_driver efifb_driver = {
360}; 363};
361 364
362builtin_platform_driver(efifb_driver); 365builtin_platform_driver(efifb_driver);
366
367#if defined(CONFIG_PCI) && !defined(CONFIG_X86)
368
369static bool pci_bar_found; /* did we find a BAR matching the efifb base? */
370
371static void claim_efifb_bar(struct pci_dev *dev, int idx)
372{
373 u16 word;
374
375 pci_bar_found = true;
376
377 pci_read_config_word(dev, PCI_COMMAND, &word);
378 if (!(word & PCI_COMMAND_MEMORY)) {
379 pci_dev_disabled = true;
380 dev_err(&dev->dev,
381 "BAR %d: assigned to efifb but device is disabled!\n",
382 idx);
383 return;
384 }
385
386 if (pci_claim_resource(dev, idx)) {
387 pci_dev_disabled = true;
388 dev_err(&dev->dev,
389 "BAR %d: failed to claim resource for efifb!\n", idx);
390 return;
391 }
392
393 dev_info(&dev->dev, "BAR %d: assigned to efifb\n", idx);
394}
395
396static void efifb_fixup_resources(struct pci_dev *dev)
397{
398 u64 base = screen_info.lfb_base;
399 u64 size = screen_info.lfb_size;
400 int i;
401
402 if (pci_bar_found || screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
403 return;
404
405 if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
406 base |= (u64)screen_info.ext_lfb_base << 32;
407
408 if (!base)
409 return;
410
411 for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
412 struct resource *res = &dev->resource[i];
413
414 if (!(res->flags & IORESOURCE_MEM))
415 continue;
416
417 if (res->start <= base && res->end >= base + size - 1) {
418 claim_efifb_bar(dev, i);
419 break;
420 }
421 }
422}
423DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY,
424 16, efifb_fixup_resources);
425
426#endif
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 1abba07b84b3..f4cbfb3b8a09 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -1608,19 +1608,6 @@ static int omapfb_find_ctrl(struct omapfb_device *fbdev)
1608 return 0; 1608 return 0;
1609} 1609}
1610 1610
1611static void check_required_callbacks(struct omapfb_device *fbdev)
1612{
1613#define _C(x) (fbdev->ctrl->x != NULL)
1614#define _P(x) (fbdev->panel->x != NULL)
1615 BUG_ON(fbdev->ctrl == NULL || fbdev->panel == NULL);
1616 BUG_ON(!(_C(init) && _C(cleanup) && _C(get_caps) &&
1617 _C(set_update_mode) && _C(setup_plane) && _C(enable_plane) &&
1618 _P(init) && _P(cleanup) && _P(enable) && _P(disable) &&
1619 _P(get_caps)));
1620#undef _P
1621#undef _C
1622}
1623
1624/* 1611/*
1625 * Called by LDM binding to probe and attach a new device. 1612 * Called by LDM binding to probe and attach a new device.
1626 * Initialization sequence: 1613 * Initialization sequence:
@@ -1705,8 +1692,6 @@ static int omapfb_do_probe(struct platform_device *pdev,
1705 omapfb_ops.fb_mmap = omapfb_mmap; 1692 omapfb_ops.fb_mmap = omapfb_mmap;
1706 init_state++; 1693 init_state++;
1707 1694
1708 check_required_callbacks(fbdev);
1709
1710 r = planes_init(fbdev); 1695 r = planes_init(fbdev);
1711 if (r) 1696 if (r)
1712 goto cleanup; 1697 goto cleanup;
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index bd017b57c47f..f599520374dd 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -578,10 +578,14 @@ static int ssd1307fb_probe(struct i2c_client *client,
578 578
579 par->vbat_reg = devm_regulator_get_optional(&client->dev, "vbat"); 579 par->vbat_reg = devm_regulator_get_optional(&client->dev, "vbat");
580 if (IS_ERR(par->vbat_reg)) { 580 if (IS_ERR(par->vbat_reg)) {
581 dev_err(&client->dev, "failed to get VBAT regulator: %ld\n",
582 PTR_ERR(par->vbat_reg));
583 ret = PTR_ERR(par->vbat_reg); 581 ret = PTR_ERR(par->vbat_reg);
584 goto fb_alloc_error; 582 if (ret == -ENODEV) {
583 par->vbat_reg = NULL;
584 } else {
585 dev_err(&client->dev, "failed to get VBAT regulator: %d\n",
586 ret);
587 goto fb_alloc_error;
588 }
585 } 589 }
586 590
587 if (of_property_read_u32(node, "solomon,width", &par->width)) 591 if (of_property_read_u32(node, "solomon,width", &par->width))
@@ -668,10 +672,13 @@ static int ssd1307fb_probe(struct i2c_client *client,
668 udelay(4); 672 udelay(4);
669 } 673 }
670 674
671 ret = regulator_enable(par->vbat_reg); 675 if (par->vbat_reg) {
672 if (ret) { 676 ret = regulator_enable(par->vbat_reg);
673 dev_err(&client->dev, "failed to enable VBAT: %d\n", ret); 677 if (ret) {
674 goto reset_oled_error; 678 dev_err(&client->dev, "failed to enable VBAT: %d\n",
679 ret);
680 goto reset_oled_error;
681 }
675 } 682 }
676 683
677 ret = ssd1307fb_init(par); 684 ret = ssd1307fb_init(par);
@@ -710,7 +717,8 @@ panel_init_error:
710 pwm_put(par->pwm); 717 pwm_put(par->pwm);
711 }; 718 };
712regulator_enable_error: 719regulator_enable_error:
713 regulator_disable(par->vbat_reg); 720 if (par->vbat_reg)
721 regulator_disable(par->vbat_reg);
714reset_oled_error: 722reset_oled_error:
715 fb_deferred_io_cleanup(info); 723 fb_deferred_io_cleanup(info);
716fb_alloc_error: 724fb_alloc_error:
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index d0115a7af0a9..3ee309c50b2d 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -643,7 +643,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
643 break; 643 break;
644 644
645 case XenbusStateInitWait: 645 case XenbusStateInitWait:
646InitWait:
647 xenbus_switch_state(dev, XenbusStateConnected); 646 xenbus_switch_state(dev, XenbusStateConnected);
648 break; 647 break;
649 648
@@ -654,7 +653,8 @@ InitWait:
654 * get Connected twice here. 653 * get Connected twice here.
655 */ 654 */
656 if (dev->state != XenbusStateConnected) 655 if (dev->state != XenbusStateConnected)
657 goto InitWait; /* no InitWait seen yet, fudge it */ 656 /* no InitWait seen yet, fudge it */
657 xenbus_switch_state(dev, XenbusStateConnected);
658 658
659 if (xenbus_read_unsigned(info->xbdev->otherend, 659 if (xenbus_read_unsigned(info->xbdev->otherend,
660 "request-update", 0)) 660 "request-update", 0))
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 400d70b69379..48230a5e12f2 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -232,6 +232,12 @@ static int virtio_dev_probe(struct device *_d)
232 if (device_features & (1ULL << i)) 232 if (device_features & (1ULL << i))
233 __virtio_set_bit(dev, i); 233 __virtio_set_bit(dev, i);
234 234
235 if (drv->validate) {
236 err = drv->validate(dev);
237 if (err)
238 goto err;
239 }
240
235 err = virtio_finalize_features(dev); 241 err = virtio_finalize_features(dev);
236 if (err) 242 if (err)
237 goto err; 243 goto err;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 4e1191508228..34adf9b9c053 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -242,11 +242,11 @@ static inline void update_stat(struct virtio_balloon *vb, int idx,
242 242
243#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT) 243#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
244 244
245static void update_balloon_stats(struct virtio_balloon *vb) 245static unsigned int update_balloon_stats(struct virtio_balloon *vb)
246{ 246{
247 unsigned long events[NR_VM_EVENT_ITEMS]; 247 unsigned long events[NR_VM_EVENT_ITEMS];
248 struct sysinfo i; 248 struct sysinfo i;
249 int idx = 0; 249 unsigned int idx = 0;
250 long available; 250 long available;
251 251
252 all_vm_events(events); 252 all_vm_events(events);
@@ -254,18 +254,22 @@ static void update_balloon_stats(struct virtio_balloon *vb)
254 254
255 available = si_mem_available(); 255 available = si_mem_available();
256 256
257#ifdef CONFIG_VM_EVENT_COUNTERS
257 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN, 258 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
258 pages_to_bytes(events[PSWPIN])); 259 pages_to_bytes(events[PSWPIN]));
259 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT, 260 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
260 pages_to_bytes(events[PSWPOUT])); 261 pages_to_bytes(events[PSWPOUT]));
261 update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]); 262 update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
262 update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]); 263 update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
264#endif
263 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE, 265 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
264 pages_to_bytes(i.freeram)); 266 pages_to_bytes(i.freeram));
265 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT, 267 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
266 pages_to_bytes(i.totalram)); 268 pages_to_bytes(i.totalram));
267 update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL, 269 update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL,
268 pages_to_bytes(available)); 270 pages_to_bytes(available));
271
272 return idx;
269} 273}
270 274
271/* 275/*
@@ -291,14 +295,14 @@ static void stats_handle_request(struct virtio_balloon *vb)
291{ 295{
292 struct virtqueue *vq; 296 struct virtqueue *vq;
293 struct scatterlist sg; 297 struct scatterlist sg;
294 unsigned int len; 298 unsigned int len, num_stats;
295 299
296 update_balloon_stats(vb); 300 num_stats = update_balloon_stats(vb);
297 301
298 vq = vb->stats_vq; 302 vq = vb->stats_vq;
299 if (!virtqueue_get_buf(vq, &len)) 303 if (!virtqueue_get_buf(vq, &len))
300 return; 304 return;
301 sg_init_one(&sg, vb->stats, sizeof(vb->stats)); 305 sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
302 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); 306 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
303 virtqueue_kick(vq); 307 virtqueue_kick(vq);
304} 308}
@@ -423,13 +427,16 @@ static int init_vqs(struct virtio_balloon *vb)
423 vb->deflate_vq = vqs[1]; 427 vb->deflate_vq = vqs[1];
424 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { 428 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
425 struct scatterlist sg; 429 struct scatterlist sg;
430 unsigned int num_stats;
426 vb->stats_vq = vqs[2]; 431 vb->stats_vq = vqs[2];
427 432
428 /* 433 /*
429 * Prime this virtqueue with one buffer so the hypervisor can 434 * Prime this virtqueue with one buffer so the hypervisor can
430 * use it to signal us later (it can't be broken yet!). 435 * use it to signal us later (it can't be broken yet!).
431 */ 436 */
432 sg_init_one(&sg, vb->stats, sizeof vb->stats); 437 num_stats = update_balloon_stats(vb);
438
439 sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
433 if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL) 440 if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
434 < 0) 441 < 0)
435 BUG(); 442 BUG();
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index df548a6fb844..698d5d06fa03 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -33,8 +33,10 @@ void vp_synchronize_vectors(struct virtio_device *vdev)
33 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 33 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
34 int i; 34 int i;
35 35
36 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, 0)); 36 if (vp_dev->intx_enabled)
37 for (i = 1; i < vp_dev->msix_vectors; i++) 37 synchronize_irq(vp_dev->pci_dev->irq);
38
39 for (i = 0; i < vp_dev->msix_vectors; ++i)
38 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i)); 40 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
39} 41}
40 42
@@ -60,13 +62,16 @@ static irqreturn_t vp_config_changed(int irq, void *opaque)
60static irqreturn_t vp_vring_interrupt(int irq, void *opaque) 62static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
61{ 63{
62 struct virtio_pci_device *vp_dev = opaque; 64 struct virtio_pci_device *vp_dev = opaque;
65 struct virtio_pci_vq_info *info;
63 irqreturn_t ret = IRQ_NONE; 66 irqreturn_t ret = IRQ_NONE;
64 struct virtqueue *vq; 67 unsigned long flags;
65 68
66 list_for_each_entry(vq, &vp_dev->vdev.vqs, list) { 69 spin_lock_irqsave(&vp_dev->lock, flags);
67 if (vq->callback && vring_interrupt(irq, vq) == IRQ_HANDLED) 70 list_for_each_entry(info, &vp_dev->virtqueues, node) {
71 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
68 ret = IRQ_HANDLED; 72 ret = IRQ_HANDLED;
69 } 73 }
74 spin_unlock_irqrestore(&vp_dev->lock, flags);
70 75
71 return ret; 76 return ret;
72} 77}
@@ -97,185 +102,244 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
97 return vp_vring_interrupt(irq, opaque); 102 return vp_vring_interrupt(irq, opaque);
98} 103}
99 104
100static void vp_remove_vqs(struct virtio_device *vdev) 105static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
106 bool per_vq_vectors, struct irq_affinity *desc)
101{ 107{
102 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 108 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
103 struct virtqueue *vq, *n; 109 const char *name = dev_name(&vp_dev->vdev.dev);
110 unsigned i, v;
111 int err = -ENOMEM;
104 112
105 list_for_each_entry_safe(vq, n, &vdev->vqs, list) { 113 vp_dev->msix_vectors = nvectors;
106 if (vp_dev->msix_vector_map) {
107 int v = vp_dev->msix_vector_map[vq->index];
108 114
109 if (v != VIRTIO_MSI_NO_VECTOR) 115 vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
110 free_irq(pci_irq_vector(vp_dev->pci_dev, v), 116 GFP_KERNEL);
111 vq); 117 if (!vp_dev->msix_names)
112 } 118 goto error;
113 vp_dev->del_vq(vq); 119 vp_dev->msix_affinity_masks
120 = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
121 GFP_KERNEL);
122 if (!vp_dev->msix_affinity_masks)
123 goto error;
124 for (i = 0; i < nvectors; ++i)
125 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
126 GFP_KERNEL))
127 goto error;
128
129 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
130 nvectors, PCI_IRQ_MSIX |
131 (desc ? PCI_IRQ_AFFINITY : 0),
132 desc);
133 if (err < 0)
134 goto error;
135 vp_dev->msix_enabled = 1;
136
137 /* Set the vector used for configuration */
138 v = vp_dev->msix_used_vectors;
139 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
140 "%s-config", name);
141 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
142 vp_config_changed, 0, vp_dev->msix_names[v],
143 vp_dev);
144 if (err)
145 goto error;
146 ++vp_dev->msix_used_vectors;
147
148 v = vp_dev->config_vector(vp_dev, v);
149 /* Verify we had enough resources to assign the vector */
150 if (v == VIRTIO_MSI_NO_VECTOR) {
151 err = -EBUSY;
152 goto error;
114 } 153 }
154
155 if (!per_vq_vectors) {
156 /* Shared vector for all VQs */
157 v = vp_dev->msix_used_vectors;
158 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
159 "%s-virtqueues", name);
160 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
161 vp_vring_interrupt, 0, vp_dev->msix_names[v],
162 vp_dev);
163 if (err)
164 goto error;
165 ++vp_dev->msix_used_vectors;
166 }
167 return 0;
168error:
169 return err;
170}
171
172static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
173 void (*callback)(struct virtqueue *vq),
174 const char *name,
175 u16 msix_vec)
176{
177 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
178 struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
179 struct virtqueue *vq;
180 unsigned long flags;
181
182 /* fill out our structure that represents an active queue */
183 if (!info)
184 return ERR_PTR(-ENOMEM);
185
186 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name,
187 msix_vec);
188 if (IS_ERR(vq))
189 goto out_info;
190
191 info->vq = vq;
192 if (callback) {
193 spin_lock_irqsave(&vp_dev->lock, flags);
194 list_add(&info->node, &vp_dev->virtqueues);
195 spin_unlock_irqrestore(&vp_dev->lock, flags);
196 } else {
197 INIT_LIST_HEAD(&info->node);
198 }
199
200 vp_dev->vqs[index] = info;
201 return vq;
202
203out_info:
204 kfree(info);
205 return vq;
206}
207
208static void vp_del_vq(struct virtqueue *vq)
209{
210 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
211 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
212 unsigned long flags;
213
214 spin_lock_irqsave(&vp_dev->lock, flags);
215 list_del(&info->node);
216 spin_unlock_irqrestore(&vp_dev->lock, flags);
217
218 vp_dev->del_vq(info);
219 kfree(info);
115} 220}
116 221
117/* the config->del_vqs() implementation */ 222/* the config->del_vqs() implementation */
118void vp_del_vqs(struct virtio_device *vdev) 223void vp_del_vqs(struct virtio_device *vdev)
119{ 224{
120 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 225 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
226 struct virtqueue *vq, *n;
121 int i; 227 int i;
122 228
123 if (WARN_ON_ONCE(list_empty_careful(&vdev->vqs))) 229 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
124 return; 230 if (vp_dev->per_vq_vectors) {
231 int v = vp_dev->vqs[vq->index]->msix_vector;
125 232
126 vp_remove_vqs(vdev); 233 if (v != VIRTIO_MSI_NO_VECTOR) {
234 int irq = pci_irq_vector(vp_dev->pci_dev, v);
235
236 irq_set_affinity_hint(irq, NULL);
237 free_irq(irq, vq);
238 }
239 }
240 vp_del_vq(vq);
241 }
242 vp_dev->per_vq_vectors = false;
243
244 if (vp_dev->intx_enabled) {
245 free_irq(vp_dev->pci_dev->irq, vp_dev);
246 vp_dev->intx_enabled = 0;
247 }
127 248
128 if (vp_dev->pci_dev->msix_enabled) { 249 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
129 for (i = 0; i < vp_dev->msix_vectors; i++) 250 free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
251
252 for (i = 0; i < vp_dev->msix_vectors; i++)
253 if (vp_dev->msix_affinity_masks[i])
130 free_cpumask_var(vp_dev->msix_affinity_masks[i]); 254 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
131 255
256 if (vp_dev->msix_enabled) {
132 /* Disable the vector used for configuration */ 257 /* Disable the vector used for configuration */
133 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); 258 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
134 259
135 kfree(vp_dev->msix_affinity_masks); 260 pci_free_irq_vectors(vp_dev->pci_dev);
136 kfree(vp_dev->msix_names); 261 vp_dev->msix_enabled = 0;
137 kfree(vp_dev->msix_vector_map);
138 } 262 }
139 263
140 free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); 264 vp_dev->msix_vectors = 0;
141 pci_free_irq_vectors(vp_dev->pci_dev); 265 vp_dev->msix_used_vectors = 0;
266 kfree(vp_dev->msix_names);
267 vp_dev->msix_names = NULL;
268 kfree(vp_dev->msix_affinity_masks);
269 vp_dev->msix_affinity_masks = NULL;
270 kfree(vp_dev->vqs);
271 vp_dev->vqs = NULL;
142} 272}
143 273
144static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, 274static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
145 struct virtqueue *vqs[], vq_callback_t *callbacks[], 275 struct virtqueue *vqs[], vq_callback_t *callbacks[],
146 const char * const names[], struct irq_affinity *desc) 276 const char * const names[], bool per_vq_vectors,
277 struct irq_affinity *desc)
147{ 278{
148 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 279 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
149 const char *name = dev_name(&vp_dev->vdev.dev);
150 int i, err = -ENOMEM, allocated_vectors, nvectors;
151 unsigned flags = PCI_IRQ_MSIX;
152 bool shared = false;
153 u16 msix_vec; 280 u16 msix_vec;
281 int i, err, nvectors, allocated_vectors;
154 282
155 if (desc) { 283 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
156 flags |= PCI_IRQ_AFFINITY; 284 if (!vp_dev->vqs)
157 desc->pre_vectors++; /* virtio config vector */ 285 return -ENOMEM;
158 }
159
160 nvectors = 1;
161 for (i = 0; i < nvqs; i++)
162 if (callbacks[i])
163 nvectors++;
164
165 /* Try one vector per queue first. */
166 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
167 nvectors, flags, desc);
168 if (err < 0) {
169 /* Fallback to one vector for config, one shared for queues. */
170 shared = true;
171 err = pci_alloc_irq_vectors(vp_dev->pci_dev, 2, 2,
172 PCI_IRQ_MSIX);
173 if (err < 0)
174 return err;
175 }
176 if (err < 0)
177 return err;
178
179 vp_dev->msix_vectors = nvectors;
180 vp_dev->msix_names = kmalloc_array(nvectors,
181 sizeof(*vp_dev->msix_names), GFP_KERNEL);
182 if (!vp_dev->msix_names)
183 goto out_free_irq_vectors;
184
185 vp_dev->msix_affinity_masks = kcalloc(nvectors,
186 sizeof(*vp_dev->msix_affinity_masks), GFP_KERNEL);
187 if (!vp_dev->msix_affinity_masks)
188 goto out_free_msix_names;
189 286
190 for (i = 0; i < nvectors; ++i) { 287 if (per_vq_vectors) {
191 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], 288 /* Best option: one for change interrupt, one per vq. */
192 GFP_KERNEL)) 289 nvectors = 1;
193 goto out_free_msix_affinity_masks; 290 for (i = 0; i < nvqs; ++i)
291 if (callbacks[i])
292 ++nvectors;
293 } else {
294 /* Second best: one for change, shared for all vqs. */
295 nvectors = 2;
194 } 296 }
195 297
196 /* Set the vector used for configuration */ 298 err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
197 snprintf(vp_dev->msix_names[0], sizeof(*vp_dev->msix_names), 299 per_vq_vectors ? desc : NULL);
198 "%s-config", name);
199 err = request_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_config_changed,
200 0, vp_dev->msix_names[0], vp_dev);
201 if (err) 300 if (err)
202 goto out_free_msix_affinity_masks; 301 goto error_find;
203 302
204 /* Verify we had enough resources to assign the vector */ 303 vp_dev->per_vq_vectors = per_vq_vectors;
205 if (vp_dev->config_vector(vp_dev, 0) == VIRTIO_MSI_NO_VECTOR) { 304 allocated_vectors = vp_dev->msix_used_vectors;
206 err = -EBUSY;
207 goto out_free_config_irq;
208 }
209
210 vp_dev->msix_vector_map = kmalloc_array(nvqs,
211 sizeof(*vp_dev->msix_vector_map), GFP_KERNEL);
212 if (!vp_dev->msix_vector_map)
213 goto out_disable_config_irq;
214
215 allocated_vectors = 1; /* vector 0 is the config interrupt */
216 for (i = 0; i < nvqs; ++i) { 305 for (i = 0; i < nvqs; ++i) {
217 if (!names[i]) { 306 if (!names[i]) {
218 vqs[i] = NULL; 307 vqs[i] = NULL;
219 continue; 308 continue;
220 } 309 }
221 310
222 if (callbacks[i]) 311 if (!callbacks[i])
223 msix_vec = allocated_vectors;
224 else
225 msix_vec = VIRTIO_MSI_NO_VECTOR; 312 msix_vec = VIRTIO_MSI_NO_VECTOR;
226 313 else if (vp_dev->per_vq_vectors)
227 vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i], 314 msix_vec = allocated_vectors++;
228 msix_vec); 315 else
316 msix_vec = VP_MSIX_VQ_VECTOR;
317 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
318 msix_vec);
229 if (IS_ERR(vqs[i])) { 319 if (IS_ERR(vqs[i])) {
230 err = PTR_ERR(vqs[i]); 320 err = PTR_ERR(vqs[i]);
231 goto out_remove_vqs; 321 goto error_find;
232 } 322 }
233 323
234 if (msix_vec == VIRTIO_MSI_NO_VECTOR) { 324 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
235 vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
236 continue; 325 continue;
237 }
238 326
239 snprintf(vp_dev->msix_names[i + 1], 327 /* allocate per-vq irq if available and necessary */
240 sizeof(*vp_dev->msix_names), "%s-%s", 328 snprintf(vp_dev->msix_names[msix_vec],
329 sizeof *vp_dev->msix_names,
330 "%s-%s",
241 dev_name(&vp_dev->vdev.dev), names[i]); 331 dev_name(&vp_dev->vdev.dev), names[i]);
242 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), 332 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
243 vring_interrupt, IRQF_SHARED, 333 vring_interrupt, 0,
244 vp_dev->msix_names[i + 1], vqs[i]); 334 vp_dev->msix_names[msix_vec],
245 if (err) { 335 vqs[i]);
246 /* don't free this irq on error */ 336 if (err)
247 vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; 337 goto error_find;
248 goto out_remove_vqs;
249 }
250 vp_dev->msix_vector_map[i] = msix_vec;
251
252 /*
253 * Use a different vector for each queue if they are available,
254 * else share the same vector for all VQs.
255 */
256 if (!shared)
257 allocated_vectors++;
258 } 338 }
259
260 return 0; 339 return 0;
261 340
262out_remove_vqs: 341error_find:
263 vp_remove_vqs(vdev); 342 vp_del_vqs(vdev);
264 kfree(vp_dev->msix_vector_map);
265out_disable_config_irq:
266 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
267out_free_config_irq:
268 free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
269out_free_msix_affinity_masks:
270 for (i = 0; i < nvectors; i++) {
271 if (vp_dev->msix_affinity_masks[i])
272 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
273 }
274 kfree(vp_dev->msix_affinity_masks);
275out_free_msix_names:
276 kfree(vp_dev->msix_names);
277out_free_irq_vectors:
278 pci_free_irq_vectors(vp_dev->pci_dev);
279 return err; 343 return err;
280} 344}
281 345
@@ -286,29 +350,33 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
286 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 350 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
287 int i, err; 351 int i, err;
288 352
353 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
354 if (!vp_dev->vqs)
355 return -ENOMEM;
356
289 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, 357 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
290 dev_name(&vdev->dev), vp_dev); 358 dev_name(&vdev->dev), vp_dev);
291 if (err) 359 if (err)
292 return err; 360 goto out_del_vqs;
293 361
362 vp_dev->intx_enabled = 1;
363 vp_dev->per_vq_vectors = false;
294 for (i = 0; i < nvqs; ++i) { 364 for (i = 0; i < nvqs; ++i) {
295 if (!names[i]) { 365 if (!names[i]) {
296 vqs[i] = NULL; 366 vqs[i] = NULL;
297 continue; 367 continue;
298 } 368 }
299 vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i], 369 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
300 VIRTIO_MSI_NO_VECTOR); 370 VIRTIO_MSI_NO_VECTOR);
301 if (IS_ERR(vqs[i])) { 371 if (IS_ERR(vqs[i])) {
302 err = PTR_ERR(vqs[i]); 372 err = PTR_ERR(vqs[i]);
303 goto out_remove_vqs; 373 goto out_del_vqs;
304 } 374 }
305 } 375 }
306 376
307 return 0; 377 return 0;
308 378out_del_vqs:
309out_remove_vqs: 379 vp_del_vqs(vdev);
310 vp_remove_vqs(vdev);
311 free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
312 return err; 380 return err;
313} 381}
314 382
@@ -319,9 +387,15 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
319{ 387{
320 int err; 388 int err;
321 389
322 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, desc); 390 /* Try MSI-X with one vector per queue. */
391 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, desc);
323 if (!err) 392 if (!err)
324 return 0; 393 return 0;
394 /* Fallback: MSI-X with one vector for config, one shared for queues. */
395 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, desc);
396 if (!err)
397 return 0;
398 /* Finally fall back to regular interrupts. */
325 return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names); 399 return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names);
326} 400}
327 401
@@ -341,15 +415,16 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
341{ 415{
342 struct virtio_device *vdev = vq->vdev; 416 struct virtio_device *vdev = vq->vdev;
343 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 417 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
418 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
419 struct cpumask *mask;
420 unsigned int irq;
344 421
345 if (!vq->callback) 422 if (!vq->callback)
346 return -EINVAL; 423 return -EINVAL;
347 424
348 if (vp_dev->pci_dev->msix_enabled) { 425 if (vp_dev->msix_enabled) {
349 int vec = vp_dev->msix_vector_map[vq->index]; 426 mask = vp_dev->msix_affinity_masks[info->msix_vector];
350 struct cpumask *mask = vp_dev->msix_affinity_masks[vec]; 427 irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
351 unsigned int irq = pci_irq_vector(vp_dev->pci_dev, vec);
352
353 if (cpu == -1) 428 if (cpu == -1)
354 irq_set_affinity_hint(irq, NULL); 429 irq_set_affinity_hint(irq, NULL);
355 else { 430 else {
@@ -364,12 +439,13 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
364const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index) 439const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
365{ 440{
366 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 441 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
367 unsigned int *map = vp_dev->msix_vector_map;
368 442
369 if (!map || map[index] == VIRTIO_MSI_NO_VECTOR) 443 if (!vp_dev->per_vq_vectors ||
444 vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
370 return NULL; 445 return NULL;
371 446
372 return pci_irq_get_affinity(vp_dev->pci_dev, map[index]); 447 return pci_irq_get_affinity(vp_dev->pci_dev,
448 vp_dev->vqs[index]->msix_vector);
373} 449}
374 450
375#ifdef CONFIG_PM_SLEEP 451#ifdef CONFIG_PM_SLEEP
@@ -440,6 +516,8 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
440 vp_dev->vdev.dev.parent = &pci_dev->dev; 516 vp_dev->vdev.dev.parent = &pci_dev->dev;
441 vp_dev->vdev.dev.release = virtio_pci_release_dev; 517 vp_dev->vdev.dev.release = virtio_pci_release_dev;
442 vp_dev->pci_dev = pci_dev; 518 vp_dev->pci_dev = pci_dev;
519 INIT_LIST_HEAD(&vp_dev->virtqueues);
520 spin_lock_init(&vp_dev->lock);
443 521
444 /* enable the device */ 522 /* enable the device */
445 rc = pci_enable_device(pci_dev); 523 rc = pci_enable_device(pci_dev);
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index ac8c9d788964..e96334aec1e0 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -31,6 +31,17 @@
31#include <linux/highmem.h> 31#include <linux/highmem.h>
32#include <linux/spinlock.h> 32#include <linux/spinlock.h>
33 33
34struct virtio_pci_vq_info {
35 /* the actual virtqueue */
36 struct virtqueue *vq;
37
38 /* the list node for the virtqueues list */
39 struct list_head node;
40
41 /* MSI-X vector (or none) */
42 unsigned msix_vector;
43};
44
34/* Our device structure */ 45/* Our device structure */
35struct virtio_pci_device { 46struct virtio_pci_device {
36 struct virtio_device vdev; 47 struct virtio_device vdev;
@@ -64,25 +75,47 @@ struct virtio_pci_device {
64 /* the IO mapping for the PCI config space */ 75 /* the IO mapping for the PCI config space */
65 void __iomem *ioaddr; 76 void __iomem *ioaddr;
66 77
78 /* a list of queues so we can dispatch IRQs */
79 spinlock_t lock;
80 struct list_head virtqueues;
81
82 /* array of all queues for house-keeping */
83 struct virtio_pci_vq_info **vqs;
84
85 /* MSI-X support */
86 int msix_enabled;
87 int intx_enabled;
67 cpumask_var_t *msix_affinity_masks; 88 cpumask_var_t *msix_affinity_masks;
68 /* Name strings for interrupts. This size should be enough, 89 /* Name strings for interrupts. This size should be enough,
69 * and I'm too lazy to allocate each name separately. */ 90 * and I'm too lazy to allocate each name separately. */
70 char (*msix_names)[256]; 91 char (*msix_names)[256];
71 /* Total Number of MSI-X vectors (including per-VQ ones). */ 92 /* Number of available vectors */
72 int msix_vectors; 93 unsigned msix_vectors;
73 /* Map of per-VQ MSI-X vectors, may be NULL */ 94 /* Vectors allocated, excluding per-vq vectors if any */
74 unsigned *msix_vector_map; 95 unsigned msix_used_vectors;
96
97 /* Whether we have vector per vq */
98 bool per_vq_vectors;
75 99
76 struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev, 100 struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev,
101 struct virtio_pci_vq_info *info,
77 unsigned idx, 102 unsigned idx,
78 void (*callback)(struct virtqueue *vq), 103 void (*callback)(struct virtqueue *vq),
79 const char *name, 104 const char *name,
80 u16 msix_vec); 105 u16 msix_vec);
81 void (*del_vq)(struct virtqueue *vq); 106 void (*del_vq)(struct virtio_pci_vq_info *info);
82 107
83 u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector); 108 u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
84}; 109};
85 110
111/* Constants for MSI-X */
112/* Use first vector for configuration changes, second and the rest for
113 * virtqueues Thus, we need at least 2 vectors for MSI. */
114enum {
115 VP_MSIX_CONFIG_VECTOR = 0,
116 VP_MSIX_VQ_VECTOR = 1,
117};
118
86/* Convert a generic virtio device to our structure */ 119/* Convert a generic virtio device to our structure */
87static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) 120static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
88{ 121{
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index f7362c5fe18a..4bfa48fb1324 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -112,6 +112,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
112} 112}
113 113
114static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, 114static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
115 struct virtio_pci_vq_info *info,
115 unsigned index, 116 unsigned index,
116 void (*callback)(struct virtqueue *vq), 117 void (*callback)(struct virtqueue *vq),
117 const char *name, 118 const char *name,
@@ -129,6 +130,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
129 if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) 130 if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
130 return ERR_PTR(-ENOENT); 131 return ERR_PTR(-ENOENT);
131 132
133 info->msix_vector = msix_vec;
134
132 /* create the vring */ 135 /* create the vring */
133 vq = vring_create_virtqueue(index, num, 136 vq = vring_create_virtqueue(index, num,
134 VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev, 137 VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
@@ -159,13 +162,14 @@ out_deactivate:
159 return ERR_PTR(err); 162 return ERR_PTR(err);
160} 163}
161 164
162static void del_vq(struct virtqueue *vq) 165static void del_vq(struct virtio_pci_vq_info *info)
163{ 166{
167 struct virtqueue *vq = info->vq;
164 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 168 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
165 169
166 iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); 170 iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
167 171
168 if (vp_dev->pci_dev->msix_enabled) { 172 if (vp_dev->msix_enabled) {
169 iowrite16(VIRTIO_MSI_NO_VECTOR, 173 iowrite16(VIRTIO_MSI_NO_VECTOR,
170 vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); 174 vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
171 /* Flush the write out to device */ 175 /* Flush the write out to device */
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 7bc3004b840e..8978f109d2d7 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -293,6 +293,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
293} 293}
294 294
295static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, 295static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
296 struct virtio_pci_vq_info *info,
296 unsigned index, 297 unsigned index,
297 void (*callback)(struct virtqueue *vq), 298 void (*callback)(struct virtqueue *vq),
298 const char *name, 299 const char *name,
@@ -322,6 +323,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
322 /* get offset of notification word for this vq */ 323 /* get offset of notification word for this vq */
323 off = vp_ioread16(&cfg->queue_notify_off); 324 off = vp_ioread16(&cfg->queue_notify_off);
324 325
326 info->msix_vector = msix_vec;
327
325 /* create the vring */ 328 /* create the vring */
326 vq = vring_create_virtqueue(index, num, 329 vq = vring_create_virtqueue(index, num,
327 SMP_CACHE_BYTES, &vp_dev->vdev, 330 SMP_CACHE_BYTES, &vp_dev->vdev,
@@ -405,13 +408,14 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
405 return 0; 408 return 0;
406} 409}
407 410
408static void del_vq(struct virtqueue *vq) 411static void del_vq(struct virtio_pci_vq_info *info)
409{ 412{
413 struct virtqueue *vq = info->vq;
410 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 414 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
411 415
412 vp_iowrite16(vq->index, &vp_dev->common->queue_select); 416 vp_iowrite16(vq->index, &vp_dev->common->queue_select);
413 417
414 if (vp_dev->pci_dev->msix_enabled) { 418 if (vp_dev->msix_enabled) {
415 vp_iowrite16(VIRTIO_MSI_NO_VECTOR, 419 vp_iowrite16(VIRTIO_MSI_NO_VECTOR,
416 &vp_dev->common->queue_msix_vector); 420 &vp_dev->common->queue_msix_vector);
417 /* Flush the write out to device */ 421 /* Flush the write out to device */
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 4ce10bcca18b..23e391d3ec01 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -27,10 +27,10 @@
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/types.h> 29#include <linux/types.h>
30#include <linux/syscore_ops.h>
30#include <linux/acpi.h> 31#include <linux/acpi.h>
31#include <acpi/processor.h> 32#include <acpi/processor.h>
32#include <xen/xen.h> 33#include <xen/xen.h>
33#include <xen/xen-ops.h>
34#include <xen/interface/platform.h> 34#include <xen/interface/platform.h>
35#include <asm/xen/hypercall.h> 35#include <asm/xen/hypercall.h>
36 36
@@ -408,7 +408,7 @@ static int check_acpi_ids(struct acpi_processor *pr_backup)
408 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 408 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
409 ACPI_UINT32_MAX, 409 ACPI_UINT32_MAX,
410 read_acpi_id, NULL, NULL, NULL); 410 read_acpi_id, NULL, NULL, NULL);
411 acpi_get_devices("ACPI0007", read_acpi_id, NULL, NULL); 411 acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, read_acpi_id, NULL, NULL);
412 412
413upload: 413upload:
414 if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) { 414 if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) {
@@ -466,15 +466,33 @@ static int xen_upload_processor_pm_data(void)
466 return rc; 466 return rc;
467} 467}
468 468
469static int xen_acpi_processor_resume(struct notifier_block *nb, 469static void xen_acpi_processor_resume_worker(struct work_struct *dummy)
470 unsigned long action, void *data)
471{ 470{
471 int rc;
472
472 bitmap_zero(acpi_ids_done, nr_acpi_bits); 473 bitmap_zero(acpi_ids_done, nr_acpi_bits);
473 return xen_upload_processor_pm_data(); 474
475 rc = xen_upload_processor_pm_data();
476 if (rc != 0)
477 pr_info("ACPI data upload failed, error = %d\n", rc);
478}
479
480static void xen_acpi_processor_resume(void)
481{
482 static DECLARE_WORK(wq, xen_acpi_processor_resume_worker);
483
484 /*
485 * xen_upload_processor_pm_data() calls non-atomic code.
486 * However, the context for xen_acpi_processor_resume is syscore
487 * with only the boot CPU online and in an atomic context.
488 *
489 * So defer the upload for some point safer.
490 */
491 schedule_work(&wq);
474} 492}
475 493
476struct notifier_block xen_acpi_processor_resume_nb = { 494static struct syscore_ops xap_syscore_ops = {
477 .notifier_call = xen_acpi_processor_resume, 495 .resume = xen_acpi_processor_resume,
478}; 496};
479 497
480static int __init xen_acpi_processor_init(void) 498static int __init xen_acpi_processor_init(void)
@@ -527,7 +545,7 @@ static int __init xen_acpi_processor_init(void)
527 if (rc) 545 if (rc)
528 goto err_unregister; 546 goto err_unregister;
529 547
530 xen_resume_notifier_register(&xen_acpi_processor_resume_nb); 548 register_syscore_ops(&xap_syscore_ops);
531 549
532 return 0; 550 return 0;
533err_unregister: 551err_unregister:
@@ -544,7 +562,7 @@ static void __exit xen_acpi_processor_exit(void)
544{ 562{
545 int i; 563 int i;
546 564
547 xen_resume_notifier_unregister(&xen_acpi_processor_resume_nb); 565 unregister_syscore_ops(&xap_syscore_ops);
548 kfree(acpi_ids_done); 566 kfree(acpi_ids_done);
549 kfree(acpi_id_present); 567 kfree(acpi_id_present);
550 kfree(acpi_id_cst_present); 568 kfree(acpi_id_cst_present);
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 1f4733b80c87..f3b089b7c0b6 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -442,8 +442,10 @@ static int xenbus_write_transaction(unsigned msg_type,
442 return xenbus_command_reply(u, XS_ERROR, "ENOENT"); 442 return xenbus_command_reply(u, XS_ERROR, "ENOENT");
443 443
444 rc = xenbus_dev_request_and_reply(&u->u.msg, u); 444 rc = xenbus_dev_request_and_reply(&u->u.msg, u);
445 if (rc) 445 if (rc && trans) {
446 list_del(&trans->list);
446 kfree(trans); 447 kfree(trans);
448 }
447 449
448out: 450out:
449 return rc; 451 return rc;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 29b7fc28c607..c4115901d906 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1259,7 +1259,7 @@ struct btrfs_root {
1259 atomic_t will_be_snapshoted; 1259 atomic_t will_be_snapshoted;
1260 1260
1261 /* For qgroup metadata space reserve */ 1261 /* For qgroup metadata space reserve */
1262 atomic_t qgroup_meta_rsv; 1262 atomic64_t qgroup_meta_rsv;
1263}; 1263};
1264static inline u32 btrfs_inode_sectorsize(const struct inode *inode) 1264static inline u32 btrfs_inode_sectorsize(const struct inode *inode)
1265{ 1265{
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 08b74daf35d0..eb1ee7b6f532 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1342,7 +1342,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1342 atomic_set(&root->orphan_inodes, 0); 1342 atomic_set(&root->orphan_inodes, 0);
1343 atomic_set(&root->refs, 1); 1343 atomic_set(&root->refs, 1);
1344 atomic_set(&root->will_be_snapshoted, 0); 1344 atomic_set(&root->will_be_snapshoted, 0);
1345 atomic_set(&root->qgroup_meta_rsv, 0); 1345 atomic64_set(&root->qgroup_meta_rsv, 0);
1346 root->log_transid = 0; 1346 root->log_transid = 0;
1347 root->log_transid_committed = -1; 1347 root->log_transid_committed = -1;
1348 root->last_log_commit = 0; 1348 root->last_log_commit = 0;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 28e81922a21c..27fdb250b446 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1714,7 +1714,8 @@ static int __process_pages_contig(struct address_space *mapping,
1714 * can we find nothing at @index. 1714 * can we find nothing at @index.
1715 */ 1715 */
1716 ASSERT(page_ops & PAGE_LOCK); 1716 ASSERT(page_ops & PAGE_LOCK);
1717 return ret; 1717 err = -EAGAIN;
1718 goto out;
1718 } 1719 }
1719 1720
1720 for (i = 0; i < ret; i++) { 1721 for (i = 0; i < ret; i++) {
@@ -2583,26 +2584,36 @@ static void end_bio_extent_readpage(struct bio *bio)
2583 2584
2584 if (tree->ops) { 2585 if (tree->ops) {
2585 ret = tree->ops->readpage_io_failed_hook(page, mirror); 2586 ret = tree->ops->readpage_io_failed_hook(page, mirror);
2586 if (!ret && !bio->bi_error) 2587 if (ret == -EAGAIN) {
2587 uptodate = 1; 2588 /*
2588 } else { 2589 * Data inode's readpage_io_failed_hook() always
2590 * returns -EAGAIN.
2591 *
2592 * The generic bio_readpage_error handles errors
2593 * the following way: If possible, new read
2594 * requests are created and submitted and will
2595 * end up in end_bio_extent_readpage as well (if
2596 * we're lucky, not in the !uptodate case). In
2597 * that case it returns 0 and we just go on with
2598 * the next page in our bio. If it can't handle
2599 * the error it will return -EIO and we remain
2600 * responsible for that page.
2601 */
2602 ret = bio_readpage_error(bio, offset, page,
2603 start, end, mirror);
2604 if (ret == 0) {
2605 uptodate = !bio->bi_error;
2606 offset += len;
2607 continue;
2608 }
2609 }
2610
2589 /* 2611 /*
2590 * The generic bio_readpage_error handles errors the 2612 * metadata's readpage_io_failed_hook() always returns
2591 * following way: If possible, new read requests are 2613 * -EIO and fixes nothing. -EIO is also returned if
2592 * created and submitted and will end up in 2614 * data inode error could not be fixed.
2593 * end_bio_extent_readpage as well (if we're lucky, not
2594 * in the !uptodate case). In that case it returns 0 and
2595 * we just go on with the next page in our bio. If it
2596 * can't handle the error it will return -EIO and we
2597 * remain responsible for that page.
2598 */ 2615 */
2599 ret = bio_readpage_error(bio, offset, page, start, end, 2616 ASSERT(ret == -EIO);
2600 mirror);
2601 if (ret == 0) {
2602 uptodate = !bio->bi_error;
2603 offset += len;
2604 continue;
2605 }
2606 } 2617 }
2607readpage_ok: 2618readpage_ok:
2608 if (likely(uptodate)) { 2619 if (likely(uptodate)) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index c40060cc481f..5e71f1ea3391 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6709,6 +6709,20 @@ static noinline int uncompress_inline(struct btrfs_path *path,
6709 max_size = min_t(unsigned long, PAGE_SIZE, max_size); 6709 max_size = min_t(unsigned long, PAGE_SIZE, max_size);
6710 ret = btrfs_decompress(compress_type, tmp, page, 6710 ret = btrfs_decompress(compress_type, tmp, page,
6711 extent_offset, inline_size, max_size); 6711 extent_offset, inline_size, max_size);
6712
6713 /*
6714 * decompression code contains a memset to fill in any space between the end
6715 * of the uncompressed data and the end of max_size in case the decompressed
6716 * data ends up shorter than ram_bytes. That doesn't cover the hole between
6717 * the end of an inline extent and the beginning of the next block, so we
6718 * cover that region here.
6719 */
6720
6721 if (max_size + pg_offset < PAGE_SIZE) {
6722 char *map = kmap(page);
6723 memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset);
6724 kunmap(page);
6725 }
6712 kfree(tmp); 6726 kfree(tmp);
6713 return ret; 6727 return ret;
6714} 6728}
@@ -7896,7 +7910,6 @@ struct btrfs_retry_complete {
7896static void btrfs_retry_endio_nocsum(struct bio *bio) 7910static void btrfs_retry_endio_nocsum(struct bio *bio)
7897{ 7911{
7898 struct btrfs_retry_complete *done = bio->bi_private; 7912 struct btrfs_retry_complete *done = bio->bi_private;
7899 struct inode *inode;
7900 struct bio_vec *bvec; 7913 struct bio_vec *bvec;
7901 int i; 7914 int i;
7902 7915
@@ -7904,12 +7917,12 @@ static void btrfs_retry_endio_nocsum(struct bio *bio)
7904 goto end; 7917 goto end;
7905 7918
7906 ASSERT(bio->bi_vcnt == 1); 7919 ASSERT(bio->bi_vcnt == 1);
7907 inode = bio->bi_io_vec->bv_page->mapping->host; 7920 ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode));
7908 ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
7909 7921
7910 done->uptodate = 1; 7922 done->uptodate = 1;
7911 bio_for_each_segment_all(bvec, bio, i) 7923 bio_for_each_segment_all(bvec, bio, i)
7912 clean_io_failure(BTRFS_I(done->inode), done->start, bvec->bv_page, 0); 7924 clean_io_failure(BTRFS_I(done->inode), done->start,
7925 bvec->bv_page, 0);
7913end: 7926end:
7914 complete(&done->done); 7927 complete(&done->done);
7915 bio_put(bio); 7928 bio_put(bio);
@@ -7959,8 +7972,10 @@ next_block_or_try_again:
7959 7972
7960 start += sectorsize; 7973 start += sectorsize;
7961 7974
7962 if (nr_sectors--) { 7975 nr_sectors--;
7976 if (nr_sectors) {
7963 pgoff += sectorsize; 7977 pgoff += sectorsize;
7978 ASSERT(pgoff < PAGE_SIZE);
7964 goto next_block_or_try_again; 7979 goto next_block_or_try_again;
7965 } 7980 }
7966 } 7981 }
@@ -7972,9 +7987,7 @@ static void btrfs_retry_endio(struct bio *bio)
7972{ 7987{
7973 struct btrfs_retry_complete *done = bio->bi_private; 7988 struct btrfs_retry_complete *done = bio->bi_private;
7974 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 7989 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
7975 struct inode *inode;
7976 struct bio_vec *bvec; 7990 struct bio_vec *bvec;
7977 u64 start;
7978 int uptodate; 7991 int uptodate;
7979 int ret; 7992 int ret;
7980 int i; 7993 int i;
@@ -7984,11 +7997,8 @@ static void btrfs_retry_endio(struct bio *bio)
7984 7997
7985 uptodate = 1; 7998 uptodate = 1;
7986 7999
7987 start = done->start;
7988
7989 ASSERT(bio->bi_vcnt == 1); 8000 ASSERT(bio->bi_vcnt == 1);
7990 inode = bio->bi_io_vec->bv_page->mapping->host; 8001 ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode));
7991 ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
7992 8002
7993 bio_for_each_segment_all(bvec, bio, i) { 8003 bio_for_each_segment_all(bvec, bio, i) {
7994 ret = __readpage_endio_check(done->inode, io_bio, i, 8004 ret = __readpage_endio_check(done->inode, io_bio, i,
@@ -8066,8 +8076,10 @@ next:
8066 8076
8067 ASSERT(nr_sectors); 8077 ASSERT(nr_sectors);
8068 8078
8069 if (--nr_sectors) { 8079 nr_sectors--;
8080 if (nr_sectors) {
8070 pgoff += sectorsize; 8081 pgoff += sectorsize;
8082 ASSERT(pgoff < PAGE_SIZE);
8071 goto next_block; 8083 goto next_block;
8072 } 8084 }
8073 } 8085 }
@@ -10509,9 +10521,9 @@ out_inode:
10509} 10521}
10510 10522
10511__attribute__((const)) 10523__attribute__((const))
10512static int dummy_readpage_io_failed_hook(struct page *page, int failed_mirror) 10524static int btrfs_readpage_io_failed_hook(struct page *page, int failed_mirror)
10513{ 10525{
10514 return 0; 10526 return -EAGAIN;
10515} 10527}
10516 10528
10517static const struct inode_operations btrfs_dir_inode_operations = { 10529static const struct inode_operations btrfs_dir_inode_operations = {
@@ -10556,7 +10568,7 @@ static const struct extent_io_ops btrfs_extent_io_ops = {
10556 .submit_bio_hook = btrfs_submit_bio_hook, 10568 .submit_bio_hook = btrfs_submit_bio_hook,
10557 .readpage_end_io_hook = btrfs_readpage_end_io_hook, 10569 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
10558 .merge_bio_hook = btrfs_merge_bio_hook, 10570 .merge_bio_hook = btrfs_merge_bio_hook,
10559 .readpage_io_failed_hook = dummy_readpage_io_failed_hook, 10571 .readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
10560 10572
10561 /* optional callbacks */ 10573 /* optional callbacks */
10562 .fill_delalloc = run_delalloc_range, 10574 .fill_delalloc = run_delalloc_range,
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index a5da750c1087..a59801dc2a34 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -2948,20 +2948,20 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
2948 ret = qgroup_reserve(root, num_bytes, enforce); 2948 ret = qgroup_reserve(root, num_bytes, enforce);
2949 if (ret < 0) 2949 if (ret < 0)
2950 return ret; 2950 return ret;
2951 atomic_add(num_bytes, &root->qgroup_meta_rsv); 2951 atomic64_add(num_bytes, &root->qgroup_meta_rsv);
2952 return ret; 2952 return ret;
2953} 2953}
2954 2954
2955void btrfs_qgroup_free_meta_all(struct btrfs_root *root) 2955void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
2956{ 2956{
2957 struct btrfs_fs_info *fs_info = root->fs_info; 2957 struct btrfs_fs_info *fs_info = root->fs_info;
2958 int reserved; 2958 u64 reserved;
2959 2959
2960 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 2960 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
2961 !is_fstree(root->objectid)) 2961 !is_fstree(root->objectid))
2962 return; 2962 return;
2963 2963
2964 reserved = atomic_xchg(&root->qgroup_meta_rsv, 0); 2964 reserved = atomic64_xchg(&root->qgroup_meta_rsv, 0);
2965 if (reserved == 0) 2965 if (reserved == 0)
2966 return; 2966 return;
2967 btrfs_qgroup_free_refroot(fs_info, root->objectid, reserved); 2967 btrfs_qgroup_free_refroot(fs_info, root->objectid, reserved);
@@ -2976,8 +2976,8 @@ void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
2976 return; 2976 return;
2977 2977
2978 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); 2978 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
2979 WARN_ON(atomic_read(&root->qgroup_meta_rsv) < num_bytes); 2979 WARN_ON(atomic64_read(&root->qgroup_meta_rsv) < num_bytes);
2980 atomic_sub(num_bytes, &root->qgroup_meta_rsv); 2980 atomic64_sub(num_bytes, &root->qgroup_meta_rsv);
2981 btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes); 2981 btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes);
2982} 2982}
2983 2983
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 456c8901489b..a60d5bfb8a49 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -6305,8 +6305,13 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
6305 goto out; 6305 goto out;
6306 } 6306 }
6307 6307
6308 /*
6309 * Check that we don't overflow at later allocations, we request
6310 * clone_sources_count + 1 items, and compare to unsigned long inside
6311 * access_ok.
6312 */
6308 if (arg->clone_sources_count > 6313 if (arg->clone_sources_count >
6309 ULLONG_MAX / sizeof(*arg->clone_sources)) { 6314 ULONG_MAX / sizeof(struct clone_root) - 1) {
6310 ret = -EINVAL; 6315 ret = -EINVAL;
6311 goto out; 6316 goto out;
6312 } 6317 }
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index da687dc79cce..9530a333d302 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -549,16 +549,19 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
549 case Opt_ssd: 549 case Opt_ssd:
550 btrfs_set_and_info(info, SSD, 550 btrfs_set_and_info(info, SSD,
551 "use ssd allocation scheme"); 551 "use ssd allocation scheme");
552 btrfs_clear_opt(info->mount_opt, NOSSD);
552 break; 553 break;
553 case Opt_ssd_spread: 554 case Opt_ssd_spread:
554 btrfs_set_and_info(info, SSD_SPREAD, 555 btrfs_set_and_info(info, SSD_SPREAD,
555 "use spread ssd allocation scheme"); 556 "use spread ssd allocation scheme");
556 btrfs_set_opt(info->mount_opt, SSD); 557 btrfs_set_opt(info->mount_opt, SSD);
558 btrfs_clear_opt(info->mount_opt, NOSSD);
557 break; 559 break;
558 case Opt_nossd: 560 case Opt_nossd:
559 btrfs_set_and_info(info, NOSSD, 561 btrfs_set_and_info(info, NOSSD,
560 "not using ssd allocation scheme"); 562 "not using ssd allocation scheme");
561 btrfs_clear_opt(info->mount_opt, SSD); 563 btrfs_clear_opt(info->mount_opt, SSD);
564 btrfs_clear_opt(info->mount_opt, SSD_SPREAD);
562 break; 565 break;
563 case Opt_barrier: 566 case Opt_barrier:
564 btrfs_clear_and_info(info, NOBARRIER, 567 btrfs_clear_and_info(info, NOBARRIER,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 73d56eef5e60..ab8a66d852f9 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -6213,7 +6213,7 @@ int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6213 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { 6213 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6214 dev = bbio->stripes[dev_nr].dev; 6214 dev = bbio->stripes[dev_nr].dev;
6215 if (!dev || !dev->bdev || 6215 if (!dev || !dev->bdev ||
6216 (bio_op(bio) == REQ_OP_WRITE && !dev->writeable)) { 6216 (bio_op(first_bio) == REQ_OP_WRITE && !dev->writeable)) {
6217 bbio_error(bbio, first_bio, logical); 6217 bbio_error(bbio, first_bio, logical);
6218 continue; 6218 continue;
6219 } 6219 }
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 15e1db8738ae..dd3f5fabfdf6 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -972,6 +972,86 @@ out:
972 return rc; 972 return rc;
973} 973}
974 974
975ssize_t cifs_file_copychunk_range(unsigned int xid,
976 struct file *src_file, loff_t off,
977 struct file *dst_file, loff_t destoff,
978 size_t len, unsigned int flags)
979{
980 struct inode *src_inode = file_inode(src_file);
981 struct inode *target_inode = file_inode(dst_file);
982 struct cifsFileInfo *smb_file_src;
983 struct cifsFileInfo *smb_file_target;
984 struct cifs_tcon *src_tcon;
985 struct cifs_tcon *target_tcon;
986 ssize_t rc;
987
988 cifs_dbg(FYI, "copychunk range\n");
989
990 if (src_inode == target_inode) {
991 rc = -EINVAL;
992 goto out;
993 }
994
995 if (!src_file->private_data || !dst_file->private_data) {
996 rc = -EBADF;
997 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
998 goto out;
999 }
1000
1001 rc = -EXDEV;
1002 smb_file_target = dst_file->private_data;
1003 smb_file_src = src_file->private_data;
1004 src_tcon = tlink_tcon(smb_file_src->tlink);
1005 target_tcon = tlink_tcon(smb_file_target->tlink);
1006
1007 if (src_tcon->ses != target_tcon->ses) {
1008 cifs_dbg(VFS, "source and target of copy not on same server\n");
1009 goto out;
1010 }
1011
1012 /*
1013 * Note: cifs case is easier than btrfs since server responsible for
1014 * checks for proper open modes and file type and if it wants
1015 * server could even support copy of range where source = target
1016 */
1017 lock_two_nondirectories(target_inode, src_inode);
1018
1019 cifs_dbg(FYI, "about to flush pages\n");
1020 /* should we flush first and last page first */
1021 truncate_inode_pages(&target_inode->i_data, 0);
1022
1023 if (target_tcon->ses->server->ops->copychunk_range)
1024 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1025 smb_file_src, smb_file_target, off, len, destoff);
1026 else
1027 rc = -EOPNOTSUPP;
1028
1029 /* force revalidate of size and timestamps of target file now
1030 * that target is updated on the server
1031 */
1032 CIFS_I(target_inode)->time = 0;
1033 /* although unlocking in the reverse order from locking is not
1034 * strictly necessary here it is a little cleaner to be consistent
1035 */
1036 unlock_two_nondirectories(src_inode, target_inode);
1037
1038out:
1039 return rc;
1040}
1041
1042static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1043 struct file *dst_file, loff_t destoff,
1044 size_t len, unsigned int flags)
1045{
1046 unsigned int xid = get_xid();
1047 ssize_t rc;
1048
1049 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1050 len, flags);
1051 free_xid(xid);
1052 return rc;
1053}
1054
975const struct file_operations cifs_file_ops = { 1055const struct file_operations cifs_file_ops = {
976 .read_iter = cifs_loose_read_iter, 1056 .read_iter = cifs_loose_read_iter,
977 .write_iter = cifs_file_write_iter, 1057 .write_iter = cifs_file_write_iter,
@@ -984,6 +1064,7 @@ const struct file_operations cifs_file_ops = {
984 .splice_read = generic_file_splice_read, 1064 .splice_read = generic_file_splice_read,
985 .llseek = cifs_llseek, 1065 .llseek = cifs_llseek,
986 .unlocked_ioctl = cifs_ioctl, 1066 .unlocked_ioctl = cifs_ioctl,
1067 .copy_file_range = cifs_copy_file_range,
987 .clone_file_range = cifs_clone_file_range, 1068 .clone_file_range = cifs_clone_file_range,
988 .setlease = cifs_setlease, 1069 .setlease = cifs_setlease,
989 .fallocate = cifs_fallocate, 1070 .fallocate = cifs_fallocate,
@@ -1001,6 +1082,7 @@ const struct file_operations cifs_file_strict_ops = {
1001 .splice_read = generic_file_splice_read, 1082 .splice_read = generic_file_splice_read,
1002 .llseek = cifs_llseek, 1083 .llseek = cifs_llseek,
1003 .unlocked_ioctl = cifs_ioctl, 1084 .unlocked_ioctl = cifs_ioctl,
1085 .copy_file_range = cifs_copy_file_range,
1004 .clone_file_range = cifs_clone_file_range, 1086 .clone_file_range = cifs_clone_file_range,
1005 .setlease = cifs_setlease, 1087 .setlease = cifs_setlease,
1006 .fallocate = cifs_fallocate, 1088 .fallocate = cifs_fallocate,
@@ -1018,6 +1100,7 @@ const struct file_operations cifs_file_direct_ops = {
1018 .mmap = cifs_file_mmap, 1100 .mmap = cifs_file_mmap,
1019 .splice_read = generic_file_splice_read, 1101 .splice_read = generic_file_splice_read,
1020 .unlocked_ioctl = cifs_ioctl, 1102 .unlocked_ioctl = cifs_ioctl,
1103 .copy_file_range = cifs_copy_file_range,
1021 .clone_file_range = cifs_clone_file_range, 1104 .clone_file_range = cifs_clone_file_range,
1022 .llseek = cifs_llseek, 1105 .llseek = cifs_llseek,
1023 .setlease = cifs_setlease, 1106 .setlease = cifs_setlease,
@@ -1035,6 +1118,7 @@ const struct file_operations cifs_file_nobrl_ops = {
1035 .splice_read = generic_file_splice_read, 1118 .splice_read = generic_file_splice_read,
1036 .llseek = cifs_llseek, 1119 .llseek = cifs_llseek,
1037 .unlocked_ioctl = cifs_ioctl, 1120 .unlocked_ioctl = cifs_ioctl,
1121 .copy_file_range = cifs_copy_file_range,
1038 .clone_file_range = cifs_clone_file_range, 1122 .clone_file_range = cifs_clone_file_range,
1039 .setlease = cifs_setlease, 1123 .setlease = cifs_setlease,
1040 .fallocate = cifs_fallocate, 1124 .fallocate = cifs_fallocate,
@@ -1051,6 +1135,7 @@ const struct file_operations cifs_file_strict_nobrl_ops = {
1051 .splice_read = generic_file_splice_read, 1135 .splice_read = generic_file_splice_read,
1052 .llseek = cifs_llseek, 1136 .llseek = cifs_llseek,
1053 .unlocked_ioctl = cifs_ioctl, 1137 .unlocked_ioctl = cifs_ioctl,
1138 .copy_file_range = cifs_copy_file_range,
1054 .clone_file_range = cifs_clone_file_range, 1139 .clone_file_range = cifs_clone_file_range,
1055 .setlease = cifs_setlease, 1140 .setlease = cifs_setlease,
1056 .fallocate = cifs_fallocate, 1141 .fallocate = cifs_fallocate,
@@ -1067,6 +1152,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = {
1067 .mmap = cifs_file_mmap, 1152 .mmap = cifs_file_mmap,
1068 .splice_read = generic_file_splice_read, 1153 .splice_read = generic_file_splice_read,
1069 .unlocked_ioctl = cifs_ioctl, 1154 .unlocked_ioctl = cifs_ioctl,
1155 .copy_file_range = cifs_copy_file_range,
1070 .clone_file_range = cifs_clone_file_range, 1156 .clone_file_range = cifs_clone_file_range,
1071 .llseek = cifs_llseek, 1157 .llseek = cifs_llseek,
1072 .setlease = cifs_setlease, 1158 .setlease = cifs_setlease,
@@ -1078,6 +1164,7 @@ const struct file_operations cifs_dir_ops = {
1078 .release = cifs_closedir, 1164 .release = cifs_closedir,
1079 .read = generic_read_dir, 1165 .read = generic_read_dir,
1080 .unlocked_ioctl = cifs_ioctl, 1166 .unlocked_ioctl = cifs_ioctl,
1167 .copy_file_range = cifs_copy_file_range,
1081 .clone_file_range = cifs_clone_file_range, 1168 .clone_file_range = cifs_clone_file_range,
1082 .llseek = generic_file_llseek, 1169 .llseek = generic_file_llseek,
1083}; 1170};
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index da717fee3026..30bf89b1fd9a 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -139,6 +139,11 @@ extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
139# define cifs_listxattr NULL 139# define cifs_listxattr NULL
140#endif 140#endif
141 141
142extern ssize_t cifs_file_copychunk_range(unsigned int xid,
143 struct file *src_file, loff_t off,
144 struct file *dst_file, loff_t destoff,
145 size_t len, unsigned int flags);
146
142extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); 147extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
143#ifdef CONFIG_CIFS_NFSD_EXPORT 148#ifdef CONFIG_CIFS_NFSD_EXPORT
144extern const struct export_operations cifs_export_ops; 149extern const struct export_operations cifs_export_ops;
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index d42dd3288647..37f5a41cc50c 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -243,6 +243,7 @@ struct smb_version_operations {
243 /* verify the message */ 243 /* verify the message */
244 int (*check_message)(char *, unsigned int, struct TCP_Server_Info *); 244 int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
245 bool (*is_oplock_break)(char *, struct TCP_Server_Info *); 245 bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
246 int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *);
246 void (*downgrade_oplock)(struct TCP_Server_Info *, 247 void (*downgrade_oplock)(struct TCP_Server_Info *,
247 struct cifsInodeInfo *, bool); 248 struct cifsInodeInfo *, bool);
248 /* process transaction2 response */ 249 /* process transaction2 response */
@@ -407,9 +408,10 @@ struct smb_version_operations {
407 char * (*create_lease_buf)(u8 *, u8); 408 char * (*create_lease_buf)(u8 *, u8);
408 /* parse lease context buffer and return oplock/epoch info */ 409 /* parse lease context buffer and return oplock/epoch info */
409 __u8 (*parse_lease_buf)(void *, unsigned int *); 410 __u8 (*parse_lease_buf)(void *, unsigned int *);
410 int (*clone_range)(const unsigned int, struct cifsFileInfo *src_file, 411 ssize_t (*copychunk_range)(const unsigned int,
411 struct cifsFileInfo *target_file, u64 src_off, u64 len, 412 struct cifsFileInfo *src_file,
412 u64 dest_off); 413 struct cifsFileInfo *target_file,
414 u64 src_off, u64 len, u64 dest_off);
413 int (*duplicate_extents)(const unsigned int, struct cifsFileInfo *src, 415 int (*duplicate_extents)(const unsigned int, struct cifsFileInfo *src,
414 struct cifsFileInfo *target_file, u64 src_off, u64 len, 416 struct cifsFileInfo *target_file, u64 src_off, u64 len,
415 u64 dest_off); 417 u64 dest_off);
@@ -946,7 +948,6 @@ struct cifs_tcon {
946 bool use_persistent:1; /* use persistent instead of durable handles */ 948 bool use_persistent:1; /* use persistent instead of durable handles */
947#ifdef CONFIG_CIFS_SMB2 949#ifdef CONFIG_CIFS_SMB2
948 bool print:1; /* set if connection to printer share */ 950 bool print:1; /* set if connection to printer share */
949 bool bad_network_name:1; /* set if ret status STATUS_BAD_NETWORK_NAME */
950 __le32 capabilities; 951 __le32 capabilities;
951 __u32 share_flags; 952 __u32 share_flags;
952 __u32 maximal_access; 953 __u32 maximal_access;
@@ -1343,6 +1344,7 @@ struct mid_q_entry {
1343 void *callback_data; /* general purpose pointer for callback */ 1344 void *callback_data; /* general purpose pointer for callback */
1344 void *resp_buf; /* pointer to received SMB header */ 1345 void *resp_buf; /* pointer to received SMB header */
1345 int mid_state; /* wish this were enum but can not pass to wait_event */ 1346 int mid_state; /* wish this were enum but can not pass to wait_event */
1347 unsigned int mid_flags;
1346 __le16 command; /* smb command code */ 1348 __le16 command; /* smb command code */
1347 bool large_buf:1; /* if valid response, is pointer to large buf */ 1349 bool large_buf:1; /* if valid response, is pointer to large buf */
1348 bool multiRsp:1; /* multiple trans2 responses for one request */ 1350 bool multiRsp:1; /* multiple trans2 responses for one request */
@@ -1350,6 +1352,12 @@ struct mid_q_entry {
1350 bool decrypted:1; /* decrypted entry */ 1352 bool decrypted:1; /* decrypted entry */
1351}; 1353};
1352 1354
1355struct close_cancelled_open {
1356 struct cifs_fid fid;
1357 struct cifs_tcon *tcon;
1358 struct work_struct work;
1359};
1360
1353/* Make code in transport.c a little cleaner by moving 1361/* Make code in transport.c a little cleaner by moving
1354 update of optional stats into function below */ 1362 update of optional stats into function below */
1355#ifdef CONFIG_CIFS_STATS2 1363#ifdef CONFIG_CIFS_STATS2
@@ -1481,6 +1489,9 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
1481#define MID_RESPONSE_MALFORMED 0x10 1489#define MID_RESPONSE_MALFORMED 0x10
1482#define MID_SHUTDOWN 0x20 1490#define MID_SHUTDOWN 0x20
1483 1491
1492/* Flags */
1493#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */
1494
1484/* Types of response buffer returned from SendReceive2 */ 1495/* Types of response buffer returned from SendReceive2 */
1485#define CIFS_NO_BUFFER 0 /* Response buffer not returned */ 1496#define CIFS_NO_BUFFER 0 /* Response buffer not returned */
1486#define CIFS_SMALL_BUFFER 1 1497#define CIFS_SMALL_BUFFER 1
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 066950671929..5d21f00ae341 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1428,6 +1428,8 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1428 1428
1429 length = cifs_discard_remaining_data(server); 1429 length = cifs_discard_remaining_data(server);
1430 dequeue_mid(mid, rdata->result); 1430 dequeue_mid(mid, rdata->result);
1431 mid->resp_buf = server->smallbuf;
1432 server->smallbuf = NULL;
1431 return length; 1433 return length;
1432} 1434}
1433 1435
@@ -1541,6 +1543,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1541 return cifs_readv_discard(server, mid); 1543 return cifs_readv_discard(server, mid);
1542 1544
1543 dequeue_mid(mid, false); 1545 dequeue_mid(mid, false);
1546 mid->resp_buf = server->smallbuf;
1547 server->smallbuf = NULL;
1544 return length; 1548 return length;
1545} 1549}
1546 1550
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 9ae695ae3ed7..d82467cfb0e2 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -904,10 +904,19 @@ cifs_demultiplex_thread(void *p)
904 904
905 server->lstrp = jiffies; 905 server->lstrp = jiffies;
906 if (mid_entry != NULL) { 906 if (mid_entry != NULL) {
907 if ((mid_entry->mid_flags & MID_WAIT_CANCELLED) &&
908 mid_entry->mid_state == MID_RESPONSE_RECEIVED &&
909 server->ops->handle_cancelled_mid)
910 server->ops->handle_cancelled_mid(
911 mid_entry->resp_buf,
912 server);
913
907 if (!mid_entry->multiRsp || mid_entry->multiEnd) 914 if (!mid_entry->multiRsp || mid_entry->multiEnd)
908 mid_entry->callback(mid_entry); 915 mid_entry->callback(mid_entry);
909 } else if (!server->ops->is_oplock_break || 916 } else if (server->ops->is_oplock_break &&
910 !server->ops->is_oplock_break(buf, server)) { 917 server->ops->is_oplock_break(buf, server)) {
918 cifs_dbg(FYI, "Received oplock break\n");
919 } else {
911 cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n", 920 cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
912 atomic_read(&midCount)); 921 atomic_read(&midCount));
913 cifs_dump_mem("Received Data is: ", buf, 922 cifs_dump_mem("Received Data is: ", buf,
@@ -3744,6 +3753,9 @@ try_mount_again:
3744 if (IS_ERR(tcon)) { 3753 if (IS_ERR(tcon)) {
3745 rc = PTR_ERR(tcon); 3754 rc = PTR_ERR(tcon);
3746 tcon = NULL; 3755 tcon = NULL;
3756 if (rc == -EACCES)
3757 goto mount_fail_check;
3758
3747 goto remote_path_check; 3759 goto remote_path_check;
3748 } 3760 }
3749 3761
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index aa3debbba826..21d404535739 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2597,7 +2597,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2597 wdata->credits = credits; 2597 wdata->credits = credits;
2598 2598
2599 if (!wdata->cfile->invalidHandle || 2599 if (!wdata->cfile->invalidHandle ||
2600 !cifs_reopen_file(wdata->cfile, false)) 2600 !(rc = cifs_reopen_file(wdata->cfile, false)))
2601 rc = server->ops->async_writev(wdata, 2601 rc = server->ops->async_writev(wdata,
2602 cifs_uncached_writedata_release); 2602 cifs_uncached_writedata_release);
2603 if (rc) { 2603 if (rc) {
@@ -3022,7 +3022,7 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3022 rdata->credits = credits; 3022 rdata->credits = credits;
3023 3023
3024 if (!rdata->cfile->invalidHandle || 3024 if (!rdata->cfile->invalidHandle ||
3025 !cifs_reopen_file(rdata->cfile, true)) 3025 !(rc = cifs_reopen_file(rdata->cfile, true)))
3026 rc = server->ops->async_readv(rdata); 3026 rc = server->ops->async_readv(rdata);
3027error: 3027error:
3028 if (rc) { 3028 if (rc) {
@@ -3617,7 +3617,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3617 } 3617 }
3618 3618
3619 if (!rdata->cfile->invalidHandle || 3619 if (!rdata->cfile->invalidHandle ||
3620 !cifs_reopen_file(rdata->cfile, true)) 3620 !(rc = cifs_reopen_file(rdata->cfile, true)))
3621 rc = server->ops->async_readv(rdata); 3621 rc = server->ops->async_readv(rdata);
3622 if (rc) { 3622 if (rc) {
3623 add_credits_and_wake_if(server, rdata->credits, 0); 3623 add_credits_and_wake_if(server, rdata->credits, 0);
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 001528781b6b..265c45fe4ea5 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -34,71 +34,14 @@
34#include "cifs_ioctl.h" 34#include "cifs_ioctl.h"
35#include <linux/btrfs.h> 35#include <linux/btrfs.h>
36 36
37static int cifs_file_clone_range(unsigned int xid, struct file *src_file, 37static long cifs_ioctl_copychunk(unsigned int xid, struct file *dst_file,
38 struct file *dst_file)
39{
40 struct inode *src_inode = file_inode(src_file);
41 struct inode *target_inode = file_inode(dst_file);
42 struct cifsFileInfo *smb_file_src;
43 struct cifsFileInfo *smb_file_target;
44 struct cifs_tcon *src_tcon;
45 struct cifs_tcon *target_tcon;
46 int rc;
47
48 cifs_dbg(FYI, "ioctl clone range\n");
49
50 if (!src_file->private_data || !dst_file->private_data) {
51 rc = -EBADF;
52 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
53 goto out;
54 }
55
56 rc = -EXDEV;
57 smb_file_target = dst_file->private_data;
58 smb_file_src = src_file->private_data;
59 src_tcon = tlink_tcon(smb_file_src->tlink);
60 target_tcon = tlink_tcon(smb_file_target->tlink);
61
62 if (src_tcon->ses != target_tcon->ses) {
63 cifs_dbg(VFS, "source and target of copy not on same server\n");
64 goto out;
65 }
66
67 /*
68 * Note: cifs case is easier than btrfs since server responsible for
69 * checks for proper open modes and file type and if it wants
70 * server could even support copy of range where source = target
71 */
72 lock_two_nondirectories(target_inode, src_inode);
73
74 cifs_dbg(FYI, "about to flush pages\n");
75 /* should we flush first and last page first */
76 truncate_inode_pages(&target_inode->i_data, 0);
77
78 if (target_tcon->ses->server->ops->clone_range)
79 rc = target_tcon->ses->server->ops->clone_range(xid,
80 smb_file_src, smb_file_target, 0, src_inode->i_size, 0);
81 else
82 rc = -EOPNOTSUPP;
83
84 /* force revalidate of size and timestamps of target file now
85 that target is updated on the server */
86 CIFS_I(target_inode)->time = 0;
87 /* although unlocking in the reverse order from locking is not
88 strictly necessary here it is a little cleaner to be consistent */
89 unlock_two_nondirectories(src_inode, target_inode);
90out:
91 return rc;
92}
93
94static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
95 unsigned long srcfd) 38 unsigned long srcfd)
96{ 39{
97 int rc; 40 int rc;
98 struct fd src_file; 41 struct fd src_file;
99 struct inode *src_inode; 42 struct inode *src_inode;
100 43
101 cifs_dbg(FYI, "ioctl clone range\n"); 44 cifs_dbg(FYI, "ioctl copychunk range\n");
102 /* the destination must be opened for writing */ 45 /* the destination must be opened for writing */
103 if (!(dst_file->f_mode & FMODE_WRITE)) { 46 if (!(dst_file->f_mode & FMODE_WRITE)) {
104 cifs_dbg(FYI, "file target not open for write\n"); 47 cifs_dbg(FYI, "file target not open for write\n");
@@ -129,7 +72,8 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
129 if (S_ISDIR(src_inode->i_mode)) 72 if (S_ISDIR(src_inode->i_mode))
130 goto out_fput; 73 goto out_fput;
131 74
132 rc = cifs_file_clone_range(xid, src_file.file, dst_file); 75 rc = cifs_file_copychunk_range(xid, src_file.file, 0, dst_file, 0,
76 src_inode->i_size, 0);
133 77
134out_fput: 78out_fput:
135 fdput(src_file); 79 fdput(src_file);
@@ -251,7 +195,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
251 } 195 }
252 break; 196 break;
253 case CIFS_IOC_COPYCHUNK_FILE: 197 case CIFS_IOC_COPYCHUNK_FILE:
254 rc = cifs_ioctl_clone(xid, filep, arg); 198 rc = cifs_ioctl_copychunk(xid, filep, arg);
255 break; 199 break;
256 case CIFS_IOC_SET_INTEGRITY: 200 case CIFS_IOC_SET_INTEGRITY:
257 if (pSMBFile == NULL) 201 if (pSMBFile == NULL)
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index cc93ba4da9b5..27bc360c7ffd 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -1015,6 +1015,15 @@ cifs_dir_needs_close(struct cifsFileInfo *cfile)
1015 return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle; 1015 return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle;
1016} 1016}
1017 1017
1018static bool
1019cifs_can_echo(struct TCP_Server_Info *server)
1020{
1021 if (server->tcpStatus == CifsGood)
1022 return true;
1023
1024 return false;
1025}
1026
1018struct smb_version_operations smb1_operations = { 1027struct smb_version_operations smb1_operations = {
1019 .send_cancel = send_nt_cancel, 1028 .send_cancel = send_nt_cancel,
1020 .compare_fids = cifs_compare_fids, 1029 .compare_fids = cifs_compare_fids,
@@ -1049,6 +1058,7 @@ struct smb_version_operations smb1_operations = {
1049 .get_dfs_refer = CIFSGetDFSRefer, 1058 .get_dfs_refer = CIFSGetDFSRefer,
1050 .qfs_tcon = cifs_qfs_tcon, 1059 .qfs_tcon = cifs_qfs_tcon,
1051 .is_path_accessible = cifs_is_path_accessible, 1060 .is_path_accessible = cifs_is_path_accessible,
1061 .can_echo = cifs_can_echo,
1052 .query_path_info = cifs_query_path_info, 1062 .query_path_info = cifs_query_path_info,
1053 .query_file_info = cifs_query_file_info, 1063 .query_file_info = cifs_query_file_info,
1054 .get_srv_inum = cifs_get_srv_inum, 1064 .get_srv_inum = cifs_get_srv_inum,
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index fd516ea8b8f8..1a04b3a5beb1 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -659,3 +659,49 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
659 cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n"); 659 cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
660 return false; 660 return false;
661} 661}
662
663void
664smb2_cancelled_close_fid(struct work_struct *work)
665{
666 struct close_cancelled_open *cancelled = container_of(work,
667 struct close_cancelled_open, work);
668
669 cifs_dbg(VFS, "Close unmatched open\n");
670
671 SMB2_close(0, cancelled->tcon, cancelled->fid.persistent_fid,
672 cancelled->fid.volatile_fid);
673 cifs_put_tcon(cancelled->tcon);
674 kfree(cancelled);
675}
676
677int
678smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
679{
680 struct smb2_sync_hdr *sync_hdr = get_sync_hdr(buffer);
681 struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer;
682 struct cifs_tcon *tcon;
683 struct close_cancelled_open *cancelled;
684
685 if (sync_hdr->Command != SMB2_CREATE ||
686 sync_hdr->Status != STATUS_SUCCESS)
687 return 0;
688
689 cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
690 if (!cancelled)
691 return -ENOMEM;
692
693 tcon = smb2_find_smb_tcon(server, sync_hdr->SessionId,
694 sync_hdr->TreeId);
695 if (!tcon) {
696 kfree(cancelled);
697 return -ENOENT;
698 }
699
700 cancelled->fid.persistent_fid = rsp->PersistentFileId;
701 cancelled->fid.volatile_fid = rsp->VolatileFileId;
702 cancelled->tcon = tcon;
703 INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
704 queue_work(cifsiod_wq, &cancelled->work);
705
706 return 0;
707}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 0231108d9387..152e37f2ad92 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -21,6 +21,7 @@
21#include <linux/vfs.h> 21#include <linux/vfs.h>
22#include <linux/falloc.h> 22#include <linux/falloc.h>
23#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
24#include <linux/uuid.h>
24#include <crypto/aead.h> 25#include <crypto/aead.h>
25#include "cifsglob.h" 26#include "cifsglob.h"
26#include "smb2pdu.h" 27#include "smb2pdu.h"
@@ -592,8 +593,8 @@ req_res_key_exit:
592 return rc; 593 return rc;
593} 594}
594 595
595static int 596static ssize_t
596smb2_clone_range(const unsigned int xid, 597smb2_copychunk_range(const unsigned int xid,
597 struct cifsFileInfo *srcfile, 598 struct cifsFileInfo *srcfile,
598 struct cifsFileInfo *trgtfile, u64 src_off, 599 struct cifsFileInfo *trgtfile, u64 src_off,
599 u64 len, u64 dest_off) 600 u64 len, u64 dest_off)
@@ -605,13 +606,14 @@ smb2_clone_range(const unsigned int xid,
605 struct cifs_tcon *tcon; 606 struct cifs_tcon *tcon;
606 int chunks_copied = 0; 607 int chunks_copied = 0;
607 bool chunk_sizes_updated = false; 608 bool chunk_sizes_updated = false;
609 ssize_t bytes_written, total_bytes_written = 0;
608 610
609 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL); 611 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
610 612
611 if (pcchunk == NULL) 613 if (pcchunk == NULL)
612 return -ENOMEM; 614 return -ENOMEM;
613 615
614 cifs_dbg(FYI, "in smb2_clone_range - about to call request res key\n"); 616 cifs_dbg(FYI, "in smb2_copychunk_range - about to call request res key\n");
615 /* Request a key from the server to identify the source of the copy */ 617 /* Request a key from the server to identify the source of the copy */
616 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink), 618 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
617 srcfile->fid.persistent_fid, 619 srcfile->fid.persistent_fid,
@@ -669,14 +671,16 @@ smb2_clone_range(const unsigned int xid,
669 } 671 }
670 chunks_copied++; 672 chunks_copied++;
671 673
672 src_off += le32_to_cpu(retbuf->TotalBytesWritten); 674 bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
673 dest_off += le32_to_cpu(retbuf->TotalBytesWritten); 675 src_off += bytes_written;
674 len -= le32_to_cpu(retbuf->TotalBytesWritten); 676 dest_off += bytes_written;
677 len -= bytes_written;
678 total_bytes_written += bytes_written;
675 679
676 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %d\n", 680 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
677 le32_to_cpu(retbuf->ChunksWritten), 681 le32_to_cpu(retbuf->ChunksWritten),
678 le32_to_cpu(retbuf->ChunkBytesWritten), 682 le32_to_cpu(retbuf->ChunkBytesWritten),
679 le32_to_cpu(retbuf->TotalBytesWritten)); 683 bytes_written);
680 } else if (rc == -EINVAL) { 684 } else if (rc == -EINVAL) {
681 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp)) 685 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
682 goto cchunk_out; 686 goto cchunk_out;
@@ -713,7 +717,10 @@ smb2_clone_range(const unsigned int xid,
713cchunk_out: 717cchunk_out:
714 kfree(pcchunk); 718 kfree(pcchunk);
715 kfree(retbuf); 719 kfree(retbuf);
716 return rc; 720 if (rc)
721 return rc;
722 else
723 return total_bytes_written;
717} 724}
718 725
719static int 726static int
@@ -2322,6 +2329,7 @@ struct smb_version_operations smb20_operations = {
2322 .clear_stats = smb2_clear_stats, 2329 .clear_stats = smb2_clear_stats,
2323 .print_stats = smb2_print_stats, 2330 .print_stats = smb2_print_stats,
2324 .is_oplock_break = smb2_is_valid_oplock_break, 2331 .is_oplock_break = smb2_is_valid_oplock_break,
2332 .handle_cancelled_mid = smb2_handle_cancelled_mid,
2325 .downgrade_oplock = smb2_downgrade_oplock, 2333 .downgrade_oplock = smb2_downgrade_oplock,
2326 .need_neg = smb2_need_neg, 2334 .need_neg = smb2_need_neg,
2327 .negotiate = smb2_negotiate, 2335 .negotiate = smb2_negotiate,
@@ -2377,7 +2385,7 @@ struct smb_version_operations smb20_operations = {
2377 .set_oplock_level = smb2_set_oplock_level, 2385 .set_oplock_level = smb2_set_oplock_level,
2378 .create_lease_buf = smb2_create_lease_buf, 2386 .create_lease_buf = smb2_create_lease_buf,
2379 .parse_lease_buf = smb2_parse_lease_buf, 2387 .parse_lease_buf = smb2_parse_lease_buf,
2380 .clone_range = smb2_clone_range, 2388 .copychunk_range = smb2_copychunk_range,
2381 .wp_retry_size = smb2_wp_retry_size, 2389 .wp_retry_size = smb2_wp_retry_size,
2382 .dir_needs_close = smb2_dir_needs_close, 2390 .dir_needs_close = smb2_dir_needs_close,
2383 .get_dfs_refer = smb2_get_dfs_refer, 2391 .get_dfs_refer = smb2_get_dfs_refer,
@@ -2404,6 +2412,7 @@ struct smb_version_operations smb21_operations = {
2404 .clear_stats = smb2_clear_stats, 2412 .clear_stats = smb2_clear_stats,
2405 .print_stats = smb2_print_stats, 2413 .print_stats = smb2_print_stats,
2406 .is_oplock_break = smb2_is_valid_oplock_break, 2414 .is_oplock_break = smb2_is_valid_oplock_break,
2415 .handle_cancelled_mid = smb2_handle_cancelled_mid,
2407 .downgrade_oplock = smb2_downgrade_oplock, 2416 .downgrade_oplock = smb2_downgrade_oplock,
2408 .need_neg = smb2_need_neg, 2417 .need_neg = smb2_need_neg,
2409 .negotiate = smb2_negotiate, 2418 .negotiate = smb2_negotiate,
@@ -2459,7 +2468,7 @@ struct smb_version_operations smb21_operations = {
2459 .set_oplock_level = smb21_set_oplock_level, 2468 .set_oplock_level = smb21_set_oplock_level,
2460 .create_lease_buf = smb2_create_lease_buf, 2469 .create_lease_buf = smb2_create_lease_buf,
2461 .parse_lease_buf = smb2_parse_lease_buf, 2470 .parse_lease_buf = smb2_parse_lease_buf,
2462 .clone_range = smb2_clone_range, 2471 .copychunk_range = smb2_copychunk_range,
2463 .wp_retry_size = smb2_wp_retry_size, 2472 .wp_retry_size = smb2_wp_retry_size,
2464 .dir_needs_close = smb2_dir_needs_close, 2473 .dir_needs_close = smb2_dir_needs_close,
2465 .enum_snapshots = smb3_enum_snapshots, 2474 .enum_snapshots = smb3_enum_snapshots,
@@ -2488,6 +2497,7 @@ struct smb_version_operations smb30_operations = {
2488 .print_stats = smb2_print_stats, 2497 .print_stats = smb2_print_stats,
2489 .dump_share_caps = smb2_dump_share_caps, 2498 .dump_share_caps = smb2_dump_share_caps,
2490 .is_oplock_break = smb2_is_valid_oplock_break, 2499 .is_oplock_break = smb2_is_valid_oplock_break,
2500 .handle_cancelled_mid = smb2_handle_cancelled_mid,
2491 .downgrade_oplock = smb2_downgrade_oplock, 2501 .downgrade_oplock = smb2_downgrade_oplock,
2492 .need_neg = smb2_need_neg, 2502 .need_neg = smb2_need_neg,
2493 .negotiate = smb2_negotiate, 2503 .negotiate = smb2_negotiate,
@@ -2545,7 +2555,7 @@ struct smb_version_operations smb30_operations = {
2545 .set_oplock_level = smb3_set_oplock_level, 2555 .set_oplock_level = smb3_set_oplock_level,
2546 .create_lease_buf = smb3_create_lease_buf, 2556 .create_lease_buf = smb3_create_lease_buf,
2547 .parse_lease_buf = smb3_parse_lease_buf, 2557 .parse_lease_buf = smb3_parse_lease_buf,
2548 .clone_range = smb2_clone_range, 2558 .copychunk_range = smb2_copychunk_range,
2549 .duplicate_extents = smb2_duplicate_extents, 2559 .duplicate_extents = smb2_duplicate_extents,
2550 .validate_negotiate = smb3_validate_negotiate, 2560 .validate_negotiate = smb3_validate_negotiate,
2551 .wp_retry_size = smb2_wp_retry_size, 2561 .wp_retry_size = smb2_wp_retry_size,
@@ -2582,6 +2592,7 @@ struct smb_version_operations smb311_operations = {
2582 .print_stats = smb2_print_stats, 2592 .print_stats = smb2_print_stats,
2583 .dump_share_caps = smb2_dump_share_caps, 2593 .dump_share_caps = smb2_dump_share_caps,
2584 .is_oplock_break = smb2_is_valid_oplock_break, 2594 .is_oplock_break = smb2_is_valid_oplock_break,
2595 .handle_cancelled_mid = smb2_handle_cancelled_mid,
2585 .downgrade_oplock = smb2_downgrade_oplock, 2596 .downgrade_oplock = smb2_downgrade_oplock,
2586 .need_neg = smb2_need_neg, 2597 .need_neg = smb2_need_neg,
2587 .negotiate = smb2_negotiate, 2598 .negotiate = smb2_negotiate,
@@ -2639,7 +2650,7 @@ struct smb_version_operations smb311_operations = {
2639 .set_oplock_level = smb3_set_oplock_level, 2650 .set_oplock_level = smb3_set_oplock_level,
2640 .create_lease_buf = smb3_create_lease_buf, 2651 .create_lease_buf = smb3_create_lease_buf,
2641 .parse_lease_buf = smb3_parse_lease_buf, 2652 .parse_lease_buf = smb3_parse_lease_buf,
2642 .clone_range = smb2_clone_range, 2653 .copychunk_range = smb2_copychunk_range,
2643 .duplicate_extents = smb2_duplicate_extents, 2654 .duplicate_extents = smb2_duplicate_extents,
2644/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */ 2655/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
2645 .wp_retry_size = smb2_wp_retry_size, 2656 .wp_retry_size = smb2_wp_retry_size,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 7446496850a3..02da648041fc 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -562,8 +562,10 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
562 * but for time being this is our only auth choice so doesn't matter. 562 * but for time being this is our only auth choice so doesn't matter.
563 * We just found a server which sets blob length to zero expecting raw. 563 * We just found a server which sets blob length to zero expecting raw.
564 */ 564 */
565 if (blob_length == 0) 565 if (blob_length == 0) {
566 cifs_dbg(FYI, "missing security blob on negprot\n"); 566 cifs_dbg(FYI, "missing security blob on negprot\n");
567 server->sec_ntlmssp = true;
568 }
567 569
568 rc = cifs_enable_signing(server, ses->sign); 570 rc = cifs_enable_signing(server, ses->sign);
569 if (rc) 571 if (rc)
@@ -1171,9 +1173,6 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
1171 else 1173 else
1172 return -EIO; 1174 return -EIO;
1173 1175
1174 if (tcon && tcon->bad_network_name)
1175 return -ENOENT;
1176
1177 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL); 1176 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
1178 if (unc_path == NULL) 1177 if (unc_path == NULL)
1179 return -ENOMEM; 1178 return -ENOMEM;
@@ -1185,6 +1184,10 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
1185 return -EINVAL; 1184 return -EINVAL;
1186 } 1185 }
1187 1186
1187 /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
1188 if (tcon)
1189 tcon->tid = 0;
1190
1188 rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req); 1191 rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
1189 if (rc) { 1192 if (rc) {
1190 kfree(unc_path); 1193 kfree(unc_path);
@@ -1273,8 +1276,6 @@ tcon_exit:
1273tcon_error_exit: 1276tcon_error_exit:
1274 if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) { 1277 if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
1275 cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); 1278 cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
1276 if (tcon)
1277 tcon->bad_network_name = true;
1278 } 1279 }
1279 goto tcon_exit; 1280 goto tcon_exit;
1280} 1281}
@@ -2177,6 +2178,9 @@ void smb2_reconnect_server(struct work_struct *work)
2177 struct cifs_tcon *tcon, *tcon2; 2178 struct cifs_tcon *tcon, *tcon2;
2178 struct list_head tmp_list; 2179 struct list_head tmp_list;
2179 int tcon_exist = false; 2180 int tcon_exist = false;
2181 int rc;
2182 int resched = false;
2183
2180 2184
2181 /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */ 2185 /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
2182 mutex_lock(&server->reconnect_mutex); 2186 mutex_lock(&server->reconnect_mutex);
@@ -2204,13 +2208,18 @@ void smb2_reconnect_server(struct work_struct *work)
2204 spin_unlock(&cifs_tcp_ses_lock); 2208 spin_unlock(&cifs_tcp_ses_lock);
2205 2209
2206 list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) { 2210 list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
2207 if (!smb2_reconnect(SMB2_INTERNAL_CMD, tcon)) 2211 rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon);
2212 if (!rc)
2208 cifs_reopen_persistent_handles(tcon); 2213 cifs_reopen_persistent_handles(tcon);
2214 else
2215 resched = true;
2209 list_del_init(&tcon->rlist); 2216 list_del_init(&tcon->rlist);
2210 cifs_put_tcon(tcon); 2217 cifs_put_tcon(tcon);
2211 } 2218 }
2212 2219
2213 cifs_dbg(FYI, "Reconnecting tcons finished\n"); 2220 cifs_dbg(FYI, "Reconnecting tcons finished\n");
2221 if (resched)
2222 queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
2214 mutex_unlock(&server->reconnect_mutex); 2223 mutex_unlock(&server->reconnect_mutex);
2215 2224
2216 /* now we can safely release srv struct */ 2225 /* now we can safely release srv struct */
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 69e35873b1de..6853454fc871 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -48,6 +48,10 @@ extern struct mid_q_entry *smb2_setup_request(struct cifs_ses *ses,
48 struct smb_rqst *rqst); 48 struct smb_rqst *rqst);
49extern struct mid_q_entry *smb2_setup_async_request( 49extern struct mid_q_entry *smb2_setup_async_request(
50 struct TCP_Server_Info *server, struct smb_rqst *rqst); 50 struct TCP_Server_Info *server, struct smb_rqst *rqst);
51extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server,
52 __u64 ses_id);
53extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server,
54 __u64 ses_id, __u32 tid);
51extern int smb2_calc_signature(struct smb_rqst *rqst, 55extern int smb2_calc_signature(struct smb_rqst *rqst,
52 struct TCP_Server_Info *server); 56 struct TCP_Server_Info *server);
53extern int smb3_calc_signature(struct smb_rqst *rqst, 57extern int smb3_calc_signature(struct smb_rqst *rqst,
@@ -164,6 +168,9 @@ extern int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
164extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon, 168extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
165 const u64 persistent_fid, const u64 volatile_fid, 169 const u64 persistent_fid, const u64 volatile_fid,
166 const __u8 oplock_level); 170 const __u8 oplock_level);
171extern int smb2_handle_cancelled_mid(char *buffer,
172 struct TCP_Server_Info *server);
173void smb2_cancelled_close_fid(struct work_struct *work);
167extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon, 174extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
168 u64 persistent_file_id, u64 volatile_file_id, 175 u64 persistent_file_id, u64 volatile_file_id,
169 struct kstatfs *FSData); 176 struct kstatfs *FSData);
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 7c3bb1bd7eed..506b67fc93d9 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -115,23 +115,70 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
115 return 0; 115 return 0;
116} 116}
117 117
118struct cifs_ses * 118static struct cifs_ses *
119smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id) 119smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
120{ 120{
121 struct cifs_ses *ses; 121 struct cifs_ses *ses;
122 122
123 spin_lock(&cifs_tcp_ses_lock);
124 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 123 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
125 if (ses->Suid != ses_id) 124 if (ses->Suid != ses_id)
126 continue; 125 continue;
127 spin_unlock(&cifs_tcp_ses_lock);
128 return ses; 126 return ses;
129 } 127 }
128
129 return NULL;
130}
131
132struct cifs_ses *
133smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id)
134{
135 struct cifs_ses *ses;
136
137 spin_lock(&cifs_tcp_ses_lock);
138 ses = smb2_find_smb_ses_unlocked(server, ses_id);
130 spin_unlock(&cifs_tcp_ses_lock); 139 spin_unlock(&cifs_tcp_ses_lock);
131 140
141 return ses;
142}
143
144static struct cifs_tcon *
145smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32 tid)
146{
147 struct cifs_tcon *tcon;
148
149 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
150 if (tcon->tid != tid)
151 continue;
152 ++tcon->tc_count;
153 return tcon;
154 }
155
132 return NULL; 156 return NULL;
133} 157}
134 158
159/*
160 * Obtain tcon corresponding to the tid in the given
161 * cifs_ses
162 */
163
164struct cifs_tcon *
165smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid)
166{
167 struct cifs_ses *ses;
168 struct cifs_tcon *tcon;
169
170 spin_lock(&cifs_tcp_ses_lock);
171 ses = smb2_find_smb_ses_unlocked(server, ses_id);
172 if (!ses) {
173 spin_unlock(&cifs_tcp_ses_lock);
174 return NULL;
175 }
176 tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
177 spin_unlock(&cifs_tcp_ses_lock);
178
179 return tcon;
180}
181
135int 182int
136smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) 183smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
137{ 184{
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 526f0533cb4e..f6e13a977fc8 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -752,9 +752,11 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
752 752
753 rc = wait_for_response(ses->server, midQ); 753 rc = wait_for_response(ses->server, midQ);
754 if (rc != 0) { 754 if (rc != 0) {
755 cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid);
755 send_cancel(ses->server, rqst, midQ); 756 send_cancel(ses->server, rqst, midQ);
756 spin_lock(&GlobalMid_Lock); 757 spin_lock(&GlobalMid_Lock);
757 if (midQ->mid_state == MID_REQUEST_SUBMITTED) { 758 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
759 midQ->mid_flags |= MID_WAIT_CANCELLED;
758 midQ->callback = DeleteMidQEntry; 760 midQ->callback = DeleteMidQEntry;
759 spin_unlock(&GlobalMid_Lock); 761 spin_unlock(&GlobalMid_Lock);
760 add_credits(ses->server, 1, optype); 762 add_credits(ses->server, 1, optype);
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 02a7a9286449..6d6eca394d4d 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -327,7 +327,6 @@ EXPORT_SYMBOL(fscrypt_decrypt_page);
327static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) 327static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
328{ 328{
329 struct dentry *dir; 329 struct dentry *dir;
330 struct fscrypt_info *ci;
331 int dir_has_key, cached_with_key; 330 int dir_has_key, cached_with_key;
332 331
333 if (flags & LOOKUP_RCU) 332 if (flags & LOOKUP_RCU)
@@ -339,18 +338,11 @@ static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
339 return 0; 338 return 0;
340 } 339 }
341 340
342 ci = d_inode(dir)->i_crypt_info;
343 if (ci && ci->ci_keyring_key &&
344 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
345 (1 << KEY_FLAG_REVOKED) |
346 (1 << KEY_FLAG_DEAD))))
347 ci = NULL;
348
349 /* this should eventually be an flag in d_flags */ 341 /* this should eventually be an flag in d_flags */
350 spin_lock(&dentry->d_lock); 342 spin_lock(&dentry->d_lock);
351 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY; 343 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
352 spin_unlock(&dentry->d_lock); 344 spin_unlock(&dentry->d_lock);
353 dir_has_key = (ci != NULL); 345 dir_has_key = (d_inode(dir)->i_crypt_info != NULL);
354 dput(dir); 346 dput(dir);
355 347
356 /* 348 /*
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 13052b85c393..37b49894c762 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -350,7 +350,7 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
350 fname->disk_name.len = iname->len; 350 fname->disk_name.len = iname->len;
351 return 0; 351 return 0;
352 } 352 }
353 ret = fscrypt_get_crypt_info(dir); 353 ret = fscrypt_get_encryption_info(dir);
354 if (ret && ret != -EOPNOTSUPP) 354 if (ret && ret != -EOPNOTSUPP)
355 return ret; 355 return ret;
356 356
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index fdbb8af32eaf..e39696e64494 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -67,7 +67,6 @@ struct fscrypt_info {
67 u8 ci_filename_mode; 67 u8 ci_filename_mode;
68 u8 ci_flags; 68 u8 ci_flags;
69 struct crypto_skcipher *ci_ctfm; 69 struct crypto_skcipher *ci_ctfm;
70 struct key *ci_keyring_key;
71 u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE]; 70 u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
72}; 71};
73 72
@@ -101,7 +100,4 @@ extern int fscrypt_do_page_crypto(const struct inode *inode,
101extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx, 100extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
102 gfp_t gfp_flags); 101 gfp_t gfp_flags);
103 102
104/* keyinfo.c */
105extern int fscrypt_get_crypt_info(struct inode *);
106
107#endif /* _FSCRYPT_PRIVATE_H */ 103#endif /* _FSCRYPT_PRIVATE_H */
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index d5d896fa5a71..8cdfddce2b34 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -95,6 +95,7 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
95 kfree(description); 95 kfree(description);
96 if (IS_ERR(keyring_key)) 96 if (IS_ERR(keyring_key))
97 return PTR_ERR(keyring_key); 97 return PTR_ERR(keyring_key);
98 down_read(&keyring_key->sem);
98 99
99 if (keyring_key->type != &key_type_logon) { 100 if (keyring_key->type != &key_type_logon) {
100 printk_once(KERN_WARNING 101 printk_once(KERN_WARNING
@@ -102,11 +103,9 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
102 res = -ENOKEY; 103 res = -ENOKEY;
103 goto out; 104 goto out;
104 } 105 }
105 down_read(&keyring_key->sem);
106 ukp = user_key_payload_locked(keyring_key); 106 ukp = user_key_payload_locked(keyring_key);
107 if (ukp->datalen != sizeof(struct fscrypt_key)) { 107 if (ukp->datalen != sizeof(struct fscrypt_key)) {
108 res = -EINVAL; 108 res = -EINVAL;
109 up_read(&keyring_key->sem);
110 goto out; 109 goto out;
111 } 110 }
112 master_key = (struct fscrypt_key *)ukp->data; 111 master_key = (struct fscrypt_key *)ukp->data;
@@ -117,17 +116,11 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
117 "%s: key size incorrect: %d\n", 116 "%s: key size incorrect: %d\n",
118 __func__, master_key->size); 117 __func__, master_key->size);
119 res = -ENOKEY; 118 res = -ENOKEY;
120 up_read(&keyring_key->sem);
121 goto out; 119 goto out;
122 } 120 }
123 res = derive_key_aes(ctx->nonce, master_key->raw, raw_key); 121 res = derive_key_aes(ctx->nonce, master_key->raw, raw_key);
124 up_read(&keyring_key->sem);
125 if (res)
126 goto out;
127
128 crypt_info->ci_keyring_key = keyring_key;
129 return 0;
130out: 122out:
123 up_read(&keyring_key->sem);
131 key_put(keyring_key); 124 key_put(keyring_key);
132 return res; 125 return res;
133} 126}
@@ -169,12 +162,11 @@ static void put_crypt_info(struct fscrypt_info *ci)
169 if (!ci) 162 if (!ci)
170 return; 163 return;
171 164
172 key_put(ci->ci_keyring_key);
173 crypto_free_skcipher(ci->ci_ctfm); 165 crypto_free_skcipher(ci->ci_ctfm);
174 kmem_cache_free(fscrypt_info_cachep, ci); 166 kmem_cache_free(fscrypt_info_cachep, ci);
175} 167}
176 168
177int fscrypt_get_crypt_info(struct inode *inode) 169int fscrypt_get_encryption_info(struct inode *inode)
178{ 170{
179 struct fscrypt_info *crypt_info; 171 struct fscrypt_info *crypt_info;
180 struct fscrypt_context ctx; 172 struct fscrypt_context ctx;
@@ -184,21 +176,15 @@ int fscrypt_get_crypt_info(struct inode *inode)
184 u8 *raw_key = NULL; 176 u8 *raw_key = NULL;
185 int res; 177 int res;
186 178
179 if (inode->i_crypt_info)
180 return 0;
181
187 res = fscrypt_initialize(inode->i_sb->s_cop->flags); 182 res = fscrypt_initialize(inode->i_sb->s_cop->flags);
188 if (res) 183 if (res)
189 return res; 184 return res;
190 185
191 if (!inode->i_sb->s_cop->get_context) 186 if (!inode->i_sb->s_cop->get_context)
192 return -EOPNOTSUPP; 187 return -EOPNOTSUPP;
193retry:
194 crypt_info = ACCESS_ONCE(inode->i_crypt_info);
195 if (crypt_info) {
196 if (!crypt_info->ci_keyring_key ||
197 key_validate(crypt_info->ci_keyring_key) == 0)
198 return 0;
199 fscrypt_put_encryption_info(inode, crypt_info);
200 goto retry;
201 }
202 188
203 res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); 189 res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
204 if (res < 0) { 190 if (res < 0) {
@@ -229,7 +215,6 @@ retry:
229 crypt_info->ci_data_mode = ctx.contents_encryption_mode; 215 crypt_info->ci_data_mode = ctx.contents_encryption_mode;
230 crypt_info->ci_filename_mode = ctx.filenames_encryption_mode; 216 crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
231 crypt_info->ci_ctfm = NULL; 217 crypt_info->ci_ctfm = NULL;
232 crypt_info->ci_keyring_key = NULL;
233 memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor, 218 memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
234 sizeof(crypt_info->ci_master_key)); 219 sizeof(crypt_info->ci_master_key));
235 220
@@ -273,14 +258,8 @@ retry:
273 if (res) 258 if (res)
274 goto out; 259 goto out;
275 260
276 kzfree(raw_key); 261 if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
277 raw_key = NULL; 262 crypt_info = NULL;
278 if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) {
279 put_crypt_info(crypt_info);
280 goto retry;
281 }
282 return 0;
283
284out: 263out:
285 if (res == -ENOKEY) 264 if (res == -ENOKEY)
286 res = 0; 265 res = 0;
@@ -288,6 +267,7 @@ out:
288 kzfree(raw_key); 267 kzfree(raw_key);
289 return res; 268 return res;
290} 269}
270EXPORT_SYMBOL(fscrypt_get_encryption_info);
291 271
292void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci) 272void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
293{ 273{
@@ -305,17 +285,3 @@ void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
305 put_crypt_info(ci); 285 put_crypt_info(ci);
306} 286}
307EXPORT_SYMBOL(fscrypt_put_encryption_info); 287EXPORT_SYMBOL(fscrypt_put_encryption_info);
308
309int fscrypt_get_encryption_info(struct inode *inode)
310{
311 struct fscrypt_info *ci = inode->i_crypt_info;
312
313 if (!ci ||
314 (ci->ci_keyring_key &&
315 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
316 (1 << KEY_FLAG_REVOKED) |
317 (1 << KEY_FLAG_DEAD)))))
318 return fscrypt_get_crypt_info(inode);
319 return 0;
320}
321EXPORT_SYMBOL(fscrypt_get_encryption_info);
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index 14b76da71269..4908906d54d5 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -33,17 +33,10 @@ static int create_encryption_context_from_policy(struct inode *inode,
33 const struct fscrypt_policy *policy) 33 const struct fscrypt_policy *policy)
34{ 34{
35 struct fscrypt_context ctx; 35 struct fscrypt_context ctx;
36 int res;
37 36
38 if (!inode->i_sb->s_cop->set_context) 37 if (!inode->i_sb->s_cop->set_context)
39 return -EOPNOTSUPP; 38 return -EOPNOTSUPP;
40 39
41 if (inode->i_sb->s_cop->prepare_context) {
42 res = inode->i_sb->s_cop->prepare_context(inode);
43 if (res)
44 return res;
45 }
46
47 ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; 40 ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
48 memcpy(ctx.master_key_descriptor, policy->master_key_descriptor, 41 memcpy(ctx.master_key_descriptor, policy->master_key_descriptor,
49 FS_KEY_DESCRIPTOR_SIZE); 42 FS_KEY_DESCRIPTOR_SIZE);
diff --git a/fs/dax.c b/fs/dax.c
index de622d4282a6..85abd741253d 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -373,6 +373,22 @@ restart:
373 } 373 }
374 spin_lock_irq(&mapping->tree_lock); 374 spin_lock_irq(&mapping->tree_lock);
375 375
376 if (!entry) {
377 /*
378 * We needed to drop the page_tree lock while calling
379 * radix_tree_preload() and we didn't have an entry to
380 * lock. See if another thread inserted an entry at
381 * our index during this time.
382 */
383 entry = __radix_tree_lookup(&mapping->page_tree, index,
384 NULL, &slot);
385 if (entry) {
386 radix_tree_preload_end();
387 spin_unlock_irq(&mapping->tree_lock);
388 goto restart;
389 }
390 }
391
376 if (pmd_downgrade) { 392 if (pmd_downgrade) {
377 radix_tree_delete(&mapping->page_tree, index); 393 radix_tree_delete(&mapping->page_tree, index);
378 mapping->nrexceptional--; 394 mapping->nrexceptional--;
@@ -388,19 +404,12 @@ restart:
388 if (err) { 404 if (err) {
389 spin_unlock_irq(&mapping->tree_lock); 405 spin_unlock_irq(&mapping->tree_lock);
390 /* 406 /*
391 * Someone already created the entry? This is a 407 * Our insertion of a DAX entry failed, most likely
392 * normal failure when inserting PMDs in a range 408 * because we were inserting a PMD entry and it
393 * that already contains PTEs. In that case we want 409 * collided with a PTE sized entry at a different
394 * to return -EEXIST immediately. 410 * index in the PMD range. We haven't inserted
395 */ 411 * anything into the radix tree and have no waiters to
396 if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD)) 412 * wake.
397 goto restart;
398 /*
399 * Our insertion of a DAX PMD entry failed, most
400 * likely because it collided with a PTE sized entry
401 * at a different index in the PMD range. We haven't
402 * inserted anything into the radix tree and have no
403 * waiters to wake.
404 */ 413 */
405 return ERR_PTR(err); 414 return ERR_PTR(err);
406 } 415 }
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index f493af666591..fb69ee2388db 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2466,6 +2466,7 @@ extern int ext4_setattr(struct dentry *, struct iattr *);
2466extern int ext4_getattr(const struct path *, struct kstat *, u32, unsigned int); 2466extern int ext4_getattr(const struct path *, struct kstat *, u32, unsigned int);
2467extern void ext4_evict_inode(struct inode *); 2467extern void ext4_evict_inode(struct inode *);
2468extern void ext4_clear_inode(struct inode *); 2468extern void ext4_clear_inode(struct inode *);
2469extern int ext4_file_getattr(const struct path *, struct kstat *, u32, unsigned int);
2469extern int ext4_sync_inode(handle_t *, struct inode *); 2470extern int ext4_sync_inode(handle_t *, struct inode *);
2470extern void ext4_dirty_inode(struct inode *, int); 2471extern void ext4_dirty_inode(struct inode *, int);
2471extern int ext4_change_inode_journal_flag(struct inode *, int); 2472extern int ext4_change_inode_journal_flag(struct inode *, int);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 8210c1f43556..cefa9835f275 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -744,7 +744,7 @@ const struct file_operations ext4_file_operations = {
744 744
745const struct inode_operations ext4_file_inode_operations = { 745const struct inode_operations ext4_file_inode_operations = {
746 .setattr = ext4_setattr, 746 .setattr = ext4_setattr,
747 .getattr = ext4_getattr, 747 .getattr = ext4_file_getattr,
748 .listxattr = ext4_listxattr, 748 .listxattr = ext4_listxattr,
749 .get_acl = ext4_get_acl, 749 .get_acl = ext4_get_acl,
750 .set_acl = ext4_set_acl, 750 .set_acl = ext4_set_acl,
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 30a9f210d1e3..375fb1c05d49 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1169,10 +1169,9 @@ static int ext4_finish_convert_inline_dir(handle_t *handle,
1169 set_buffer_uptodate(dir_block); 1169 set_buffer_uptodate(dir_block);
1170 err = ext4_handle_dirty_dirent_node(handle, inode, dir_block); 1170 err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
1171 if (err) 1171 if (err)
1172 goto out; 1172 return err;
1173 set_buffer_verified(dir_block); 1173 set_buffer_verified(dir_block);
1174out: 1174 return ext4_mark_inode_dirty(handle, inode);
1175 return err;
1176} 1175}
1177 1176
1178static int ext4_convert_inline_data_nolock(handle_t *handle, 1177static int ext4_convert_inline_data_nolock(handle_t *handle,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 7385e6a6b6cb..b9ffa9f4191f 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5390,17 +5390,52 @@ err_out:
5390int ext4_getattr(const struct path *path, struct kstat *stat, 5390int ext4_getattr(const struct path *path, struct kstat *stat,
5391 u32 request_mask, unsigned int query_flags) 5391 u32 request_mask, unsigned int query_flags)
5392{ 5392{
5393 struct inode *inode; 5393 struct inode *inode = d_inode(path->dentry);
5394 unsigned long long delalloc_blocks; 5394 struct ext4_inode *raw_inode;
5395 struct ext4_inode_info *ei = EXT4_I(inode);
5396 unsigned int flags;
5397
5398 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
5399 stat->result_mask |= STATX_BTIME;
5400 stat->btime.tv_sec = ei->i_crtime.tv_sec;
5401 stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
5402 }
5403
5404 flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
5405 if (flags & EXT4_APPEND_FL)
5406 stat->attributes |= STATX_ATTR_APPEND;
5407 if (flags & EXT4_COMPR_FL)
5408 stat->attributes |= STATX_ATTR_COMPRESSED;
5409 if (flags & EXT4_ENCRYPT_FL)
5410 stat->attributes |= STATX_ATTR_ENCRYPTED;
5411 if (flags & EXT4_IMMUTABLE_FL)
5412 stat->attributes |= STATX_ATTR_IMMUTABLE;
5413 if (flags & EXT4_NODUMP_FL)
5414 stat->attributes |= STATX_ATTR_NODUMP;
5415
5416 stat->attributes_mask |= (STATX_ATTR_APPEND |
5417 STATX_ATTR_COMPRESSED |
5418 STATX_ATTR_ENCRYPTED |
5419 STATX_ATTR_IMMUTABLE |
5420 STATX_ATTR_NODUMP);
5395 5421
5396 inode = d_inode(path->dentry);
5397 generic_fillattr(inode, stat); 5422 generic_fillattr(inode, stat);
5423 return 0;
5424}
5425
5426int ext4_file_getattr(const struct path *path, struct kstat *stat,
5427 u32 request_mask, unsigned int query_flags)
5428{
5429 struct inode *inode = d_inode(path->dentry);
5430 u64 delalloc_blocks;
5431
5432 ext4_getattr(path, stat, request_mask, query_flags);
5398 5433
5399 /* 5434 /*
5400 * If there is inline data in the inode, the inode will normally not 5435 * If there is inline data in the inode, the inode will normally not
5401 * have data blocks allocated (it may have an external xattr block). 5436 * have data blocks allocated (it may have an external xattr block).
5402 * Report at least one sector for such files, so tools like tar, rsync, 5437 * Report at least one sector for such files, so tools like tar, rsync,
5403 * others doen't incorrectly think the file is completely sparse. 5438 * others don't incorrectly think the file is completely sparse.
5404 */ 5439 */
5405 if (unlikely(ext4_has_inline_data(inode))) 5440 if (unlikely(ext4_has_inline_data(inode)))
5406 stat->blocks += (stat->size + 511) >> 9; 5441 stat->blocks += (stat->size + 511) >> 9;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 578f8c33fb44..c992ef2c2f94 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -511,7 +511,7 @@ mext_check_arguments(struct inode *orig_inode,
511 if ((orig_start & ~(PAGE_MASK >> orig_inode->i_blkbits)) != 511 if ((orig_start & ~(PAGE_MASK >> orig_inode->i_blkbits)) !=
512 (donor_start & ~(PAGE_MASK >> orig_inode->i_blkbits))) { 512 (donor_start & ~(PAGE_MASK >> orig_inode->i_blkbits))) {
513 ext4_debug("ext4 move extent: orig and donor's start " 513 ext4_debug("ext4 move extent: orig and donor's start "
514 "offset are not alligned [ino:orig %lu, donor %lu]\n", 514 "offsets are not aligned [ino:orig %lu, donor %lu]\n",
515 orig_inode->i_ino, donor_inode->i_ino); 515 orig_inode->i_ino, donor_inode->i_ino);
516 return -EINVAL; 516 return -EINVAL;
517 } 517 }
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 6ad612c576fc..07e5e1405771 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -3912,6 +3912,7 @@ const struct inode_operations ext4_dir_inode_operations = {
3912 .tmpfile = ext4_tmpfile, 3912 .tmpfile = ext4_tmpfile,
3913 .rename = ext4_rename2, 3913 .rename = ext4_rename2,
3914 .setattr = ext4_setattr, 3914 .setattr = ext4_setattr,
3915 .getattr = ext4_getattr,
3915 .listxattr = ext4_listxattr, 3916 .listxattr = ext4_listxattr,
3916 .get_acl = ext4_get_acl, 3917 .get_acl = ext4_get_acl,
3917 .set_acl = ext4_set_acl, 3918 .set_acl = ext4_set_acl,
@@ -3920,6 +3921,7 @@ const struct inode_operations ext4_dir_inode_operations = {
3920 3921
3921const struct inode_operations ext4_special_inode_operations = { 3922const struct inode_operations ext4_special_inode_operations = {
3922 .setattr = ext4_setattr, 3923 .setattr = ext4_setattr,
3924 .getattr = ext4_getattr,
3923 .listxattr = ext4_listxattr, 3925 .listxattr = ext4_listxattr,
3924 .get_acl = ext4_get_acl, 3926 .get_acl = ext4_get_acl,
3925 .set_acl = ext4_set_acl, 3927 .set_acl = ext4_set_acl,
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 2e03a0a88d92..a9448db1cf7e 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1120,17 +1120,16 @@ static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
1120 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len); 1120 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
1121} 1121}
1122 1122
1123static int ext4_prepare_context(struct inode *inode)
1124{
1125 return ext4_convert_inline_data(inode);
1126}
1127
1128static int ext4_set_context(struct inode *inode, const void *ctx, size_t len, 1123static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
1129 void *fs_data) 1124 void *fs_data)
1130{ 1125{
1131 handle_t *handle = fs_data; 1126 handle_t *handle = fs_data;
1132 int res, res2, retries = 0; 1127 int res, res2, retries = 0;
1133 1128
1129 res = ext4_convert_inline_data(inode);
1130 if (res)
1131 return res;
1132
1134 /* 1133 /*
1135 * If a journal handle was specified, then the encryption context is 1134 * If a journal handle was specified, then the encryption context is
1136 * being set on a new inode via inheritance and is part of a larger 1135 * being set on a new inode via inheritance and is part of a larger
@@ -1196,7 +1195,6 @@ static unsigned ext4_max_namelen(struct inode *inode)
1196static const struct fscrypt_operations ext4_cryptops = { 1195static const struct fscrypt_operations ext4_cryptops = {
1197 .key_prefix = "ext4:", 1196 .key_prefix = "ext4:",
1198 .get_context = ext4_get_context, 1197 .get_context = ext4_get_context,
1199 .prepare_context = ext4_prepare_context,
1200 .set_context = ext4_set_context, 1198 .set_context = ext4_set_context,
1201 .dummy_context = ext4_dummy_context, 1199 .dummy_context = ext4_dummy_context,
1202 .is_encrypted = ext4_encrypted_inode, 1200 .is_encrypted = ext4_encrypted_inode,
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index 73b184d161fc..5c8fc53cb0e5 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -85,17 +85,20 @@ errout:
85const struct inode_operations ext4_encrypted_symlink_inode_operations = { 85const struct inode_operations ext4_encrypted_symlink_inode_operations = {
86 .get_link = ext4_encrypted_get_link, 86 .get_link = ext4_encrypted_get_link,
87 .setattr = ext4_setattr, 87 .setattr = ext4_setattr,
88 .getattr = ext4_getattr,
88 .listxattr = ext4_listxattr, 89 .listxattr = ext4_listxattr,
89}; 90};
90 91
91const struct inode_operations ext4_symlink_inode_operations = { 92const struct inode_operations ext4_symlink_inode_operations = {
92 .get_link = page_get_link, 93 .get_link = page_get_link,
93 .setattr = ext4_setattr, 94 .setattr = ext4_setattr,
95 .getattr = ext4_getattr,
94 .listxattr = ext4_listxattr, 96 .listxattr = ext4_listxattr,
95}; 97};
96 98
97const struct inode_operations ext4_fast_symlink_inode_operations = { 99const struct inode_operations ext4_fast_symlink_inode_operations = {
98 .get_link = simple_get_link, 100 .get_link = simple_get_link,
99 .setattr = ext4_setattr, 101 .setattr = ext4_setattr,
102 .getattr = ext4_getattr,
100 .listxattr = ext4_listxattr, 103 .listxattr = ext4_listxattr,
101}; 104};
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 67636acf7624..996e7900d4c8 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -131,31 +131,26 @@ static __le32 ext4_xattr_block_csum(struct inode *inode,
131} 131}
132 132
133static int ext4_xattr_block_csum_verify(struct inode *inode, 133static int ext4_xattr_block_csum_verify(struct inode *inode,
134 sector_t block_nr, 134 struct buffer_head *bh)
135 struct ext4_xattr_header *hdr)
136{ 135{
137 if (ext4_has_metadata_csum(inode->i_sb) && 136 struct ext4_xattr_header *hdr = BHDR(bh);
138 (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr))) 137 int ret = 1;
139 return 0;
140 return 1;
141}
142
143static void ext4_xattr_block_csum_set(struct inode *inode,
144 sector_t block_nr,
145 struct ext4_xattr_header *hdr)
146{
147 if (!ext4_has_metadata_csum(inode->i_sb))
148 return;
149 138
150 hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr); 139 if (ext4_has_metadata_csum(inode->i_sb)) {
140 lock_buffer(bh);
141 ret = (hdr->h_checksum == ext4_xattr_block_csum(inode,
142 bh->b_blocknr, hdr));
143 unlock_buffer(bh);
144 }
145 return ret;
151} 146}
152 147
153static inline int ext4_handle_dirty_xattr_block(handle_t *handle, 148static void ext4_xattr_block_csum_set(struct inode *inode,
154 struct inode *inode, 149 struct buffer_head *bh)
155 struct buffer_head *bh)
156{ 150{
157 ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh)); 151 if (ext4_has_metadata_csum(inode->i_sb))
158 return ext4_handle_dirty_metadata(handle, inode, bh); 152 BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode,
153 bh->b_blocknr, BHDR(bh));
159} 154}
160 155
161static inline const struct xattr_handler * 156static inline const struct xattr_handler *
@@ -233,7 +228,7 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
233 if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || 228 if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
234 BHDR(bh)->h_blocks != cpu_to_le32(1)) 229 BHDR(bh)->h_blocks != cpu_to_le32(1))
235 return -EFSCORRUPTED; 230 return -EFSCORRUPTED;
236 if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh))) 231 if (!ext4_xattr_block_csum_verify(inode, bh))
237 return -EFSBADCRC; 232 return -EFSBADCRC;
238 error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size, 233 error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
239 bh->b_data); 234 bh->b_data);
@@ -618,23 +613,22 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
618 } 613 }
619 } 614 }
620 615
616 ext4_xattr_block_csum_set(inode, bh);
621 /* 617 /*
622 * Beware of this ugliness: Releasing of xattr block references 618 * Beware of this ugliness: Releasing of xattr block references
623 * from different inodes can race and so we have to protect 619 * from different inodes can race and so we have to protect
624 * from a race where someone else frees the block (and releases 620 * from a race where someone else frees the block (and releases
625 * its journal_head) before we are done dirtying the buffer. In 621 * its journal_head) before we are done dirtying the buffer. In
626 * nojournal mode this race is harmless and we actually cannot 622 * nojournal mode this race is harmless and we actually cannot
627 * call ext4_handle_dirty_xattr_block() with locked buffer as 623 * call ext4_handle_dirty_metadata() with locked buffer as
628 * that function can call sync_dirty_buffer() so for that case 624 * that function can call sync_dirty_buffer() so for that case
629 * we handle the dirtying after unlocking the buffer. 625 * we handle the dirtying after unlocking the buffer.
630 */ 626 */
631 if (ext4_handle_valid(handle)) 627 if (ext4_handle_valid(handle))
632 error = ext4_handle_dirty_xattr_block(handle, inode, 628 error = ext4_handle_dirty_metadata(handle, inode, bh);
633 bh);
634 unlock_buffer(bh); 629 unlock_buffer(bh);
635 if (!ext4_handle_valid(handle)) 630 if (!ext4_handle_valid(handle))
636 error = ext4_handle_dirty_xattr_block(handle, inode, 631 error = ext4_handle_dirty_metadata(handle, inode, bh);
637 bh);
638 if (IS_SYNC(inode)) 632 if (IS_SYNC(inode))
639 ext4_handle_sync(handle); 633 ext4_handle_sync(handle);
640 dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1)); 634 dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
@@ -863,13 +857,14 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
863 ext4_xattr_cache_insert(ext4_mb_cache, 857 ext4_xattr_cache_insert(ext4_mb_cache,
864 bs->bh); 858 bs->bh);
865 } 859 }
860 ext4_xattr_block_csum_set(inode, bs->bh);
866 unlock_buffer(bs->bh); 861 unlock_buffer(bs->bh);
867 if (error == -EFSCORRUPTED) 862 if (error == -EFSCORRUPTED)
868 goto bad_block; 863 goto bad_block;
869 if (!error) 864 if (!error)
870 error = ext4_handle_dirty_xattr_block(handle, 865 error = ext4_handle_dirty_metadata(handle,
871 inode, 866 inode,
872 bs->bh); 867 bs->bh);
873 if (error) 868 if (error)
874 goto cleanup; 869 goto cleanup;
875 goto inserted; 870 goto inserted;
@@ -967,10 +962,11 @@ inserted:
967 ce->e_reusable = 0; 962 ce->e_reusable = 0;
968 ea_bdebug(new_bh, "reusing; refcount now=%d", 963 ea_bdebug(new_bh, "reusing; refcount now=%d",
969 ref); 964 ref);
965 ext4_xattr_block_csum_set(inode, new_bh);
970 unlock_buffer(new_bh); 966 unlock_buffer(new_bh);
971 error = ext4_handle_dirty_xattr_block(handle, 967 error = ext4_handle_dirty_metadata(handle,
972 inode, 968 inode,
973 new_bh); 969 new_bh);
974 if (error) 970 if (error)
975 goto cleanup_dquot; 971 goto cleanup_dquot;
976 } 972 }
@@ -1020,11 +1016,12 @@ getblk_failed:
1020 goto getblk_failed; 1016 goto getblk_failed;
1021 } 1017 }
1022 memcpy(new_bh->b_data, s->base, new_bh->b_size); 1018 memcpy(new_bh->b_data, s->base, new_bh->b_size);
1019 ext4_xattr_block_csum_set(inode, new_bh);
1023 set_buffer_uptodate(new_bh); 1020 set_buffer_uptodate(new_bh);
1024 unlock_buffer(new_bh); 1021 unlock_buffer(new_bh);
1025 ext4_xattr_cache_insert(ext4_mb_cache, new_bh); 1022 ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
1026 error = ext4_handle_dirty_xattr_block(handle, 1023 error = ext4_handle_dirty_metadata(handle, inode,
1027 inode, new_bh); 1024 new_bh);
1028 if (error) 1025 if (error)
1029 goto cleanup; 1026 goto cleanup;
1030 } 1027 }
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index a77df377e2e8..ee2d0a485fc3 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -196,6 +196,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
196 si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS); 196 si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS);
197 si->base_mem += NM_I(sbi)->nat_blocks * NAT_ENTRY_BITMAP_SIZE; 197 si->base_mem += NM_I(sbi)->nat_blocks * NAT_ENTRY_BITMAP_SIZE;
198 si->base_mem += NM_I(sbi)->nat_blocks / 8; 198 si->base_mem += NM_I(sbi)->nat_blocks / 8;
199 si->base_mem += NM_I(sbi)->nat_blocks * sizeof(unsigned short);
199 200
200get_cache: 201get_cache:
201 si->cache_mem = 0; 202 si->cache_mem = 0;
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 4650c9b85de7..8d5c62b07b28 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -750,7 +750,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
750 dentry_blk = page_address(page); 750 dentry_blk = page_address(page);
751 bit_pos = dentry - dentry_blk->dentry; 751 bit_pos = dentry - dentry_blk->dentry;
752 for (i = 0; i < slots; i++) 752 for (i = 0; i < slots; i++)
753 clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); 753 __clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
754 754
755 /* Let's check and deallocate this dentry page */ 755 /* Let's check and deallocate this dentry page */
756 bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, 756 bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index e849f83d6114..0a6e115562f6 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -561,6 +561,8 @@ struct f2fs_nm_info {
561 struct mutex build_lock; /* lock for build free nids */ 561 struct mutex build_lock; /* lock for build free nids */
562 unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE]; 562 unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE];
563 unsigned char *nat_block_bitmap; 563 unsigned char *nat_block_bitmap;
564 unsigned short *free_nid_count; /* free nid count of NAT block */
565 spinlock_t free_nid_lock; /* protect updating of nid count */
564 566
565 /* for checkpoint */ 567 /* for checkpoint */
566 char *nat_bitmap; /* NAT bitmap pointer */ 568 char *nat_bitmap; /* NAT bitmap pointer */
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 94967171dee8..481aa8dc79f4 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -338,9 +338,6 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
338 set_nat_flag(e, IS_CHECKPOINTED, false); 338 set_nat_flag(e, IS_CHECKPOINTED, false);
339 __set_nat_cache_dirty(nm_i, e); 339 __set_nat_cache_dirty(nm_i, e);
340 340
341 if (enabled_nat_bits(sbi, NULL) && new_blkaddr == NEW_ADDR)
342 clear_bit_le(NAT_BLOCK_OFFSET(ni->nid), nm_i->empty_nat_bits);
343
344 /* update fsync_mark if its inode nat entry is still alive */ 341 /* update fsync_mark if its inode nat entry is still alive */
345 if (ni->nid != ni->ino) 342 if (ni->nid != ni->ino)
346 e = __lookup_nat_cache(nm_i, ni->ino); 343 e = __lookup_nat_cache(nm_i, ni->ino);
@@ -1823,7 +1820,8 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
1823 kmem_cache_free(free_nid_slab, i); 1820 kmem_cache_free(free_nid_slab, i);
1824} 1821}
1825 1822
1826void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, bool set) 1823static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
1824 bool set, bool build, bool locked)
1827{ 1825{
1828 struct f2fs_nm_info *nm_i = NM_I(sbi); 1826 struct f2fs_nm_info *nm_i = NM_I(sbi);
1829 unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid); 1827 unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
@@ -1833,9 +1831,18 @@ void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, bool set)
1833 return; 1831 return;
1834 1832
1835 if (set) 1833 if (set)
1836 set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 1834 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
1837 else 1835 else
1838 clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 1836 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
1837
1838 if (!locked)
1839 spin_lock(&nm_i->free_nid_lock);
1840 if (set)
1841 nm_i->free_nid_count[nat_ofs]++;
1842 else if (!build)
1843 nm_i->free_nid_count[nat_ofs]--;
1844 if (!locked)
1845 spin_unlock(&nm_i->free_nid_lock);
1839} 1846}
1840 1847
1841static void scan_nat_page(struct f2fs_sb_info *sbi, 1848static void scan_nat_page(struct f2fs_sb_info *sbi,
@@ -1847,7 +1854,10 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
1847 unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid); 1854 unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
1848 int i; 1855 int i;
1849 1856
1850 set_bit_le(nat_ofs, nm_i->nat_block_bitmap); 1857 if (test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
1858 return;
1859
1860 __set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
1851 1861
1852 i = start_nid % NAT_ENTRY_PER_BLOCK; 1862 i = start_nid % NAT_ENTRY_PER_BLOCK;
1853 1863
@@ -1861,7 +1871,7 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
1861 f2fs_bug_on(sbi, blk_addr == NEW_ADDR); 1871 f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
1862 if (blk_addr == NULL_ADDR) 1872 if (blk_addr == NULL_ADDR)
1863 freed = add_free_nid(sbi, start_nid, true); 1873 freed = add_free_nid(sbi, start_nid, true);
1864 update_free_nid_bitmap(sbi, start_nid, freed); 1874 update_free_nid_bitmap(sbi, start_nid, freed, true, false);
1865 } 1875 }
1866} 1876}
1867 1877
@@ -1877,6 +1887,8 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
1877 for (i = 0; i < nm_i->nat_blocks; i++) { 1887 for (i = 0; i < nm_i->nat_blocks; i++) {
1878 if (!test_bit_le(i, nm_i->nat_block_bitmap)) 1888 if (!test_bit_le(i, nm_i->nat_block_bitmap))
1879 continue; 1889 continue;
1890 if (!nm_i->free_nid_count[i])
1891 continue;
1880 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) { 1892 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
1881 nid_t nid; 1893 nid_t nid;
1882 1894
@@ -1907,58 +1919,6 @@ out:
1907 up_read(&nm_i->nat_tree_lock); 1919 up_read(&nm_i->nat_tree_lock);
1908} 1920}
1909 1921
1910static int scan_nat_bits(struct f2fs_sb_info *sbi)
1911{
1912 struct f2fs_nm_info *nm_i = NM_I(sbi);
1913 struct page *page;
1914 unsigned int i = 0;
1915 nid_t nid;
1916
1917 if (!enabled_nat_bits(sbi, NULL))
1918 return -EAGAIN;
1919
1920 down_read(&nm_i->nat_tree_lock);
1921check_empty:
1922 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
1923 if (i >= nm_i->nat_blocks) {
1924 i = 0;
1925 goto check_partial;
1926 }
1927
1928 for (nid = i * NAT_ENTRY_PER_BLOCK; nid < (i + 1) * NAT_ENTRY_PER_BLOCK;
1929 nid++) {
1930 if (unlikely(nid >= nm_i->max_nid))
1931 break;
1932 add_free_nid(sbi, nid, true);
1933 }
1934
1935 if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS)
1936 goto out;
1937 i++;
1938 goto check_empty;
1939
1940check_partial:
1941 i = find_next_zero_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
1942 if (i >= nm_i->nat_blocks) {
1943 disable_nat_bits(sbi, true);
1944 up_read(&nm_i->nat_tree_lock);
1945 return -EINVAL;
1946 }
1947
1948 nid = i * NAT_ENTRY_PER_BLOCK;
1949 page = get_current_nat_page(sbi, nid);
1950 scan_nat_page(sbi, page, nid);
1951 f2fs_put_page(page, 1);
1952
1953 if (nm_i->nid_cnt[FREE_NID_LIST] < MAX_FREE_NIDS) {
1954 i++;
1955 goto check_partial;
1956 }
1957out:
1958 up_read(&nm_i->nat_tree_lock);
1959 return 0;
1960}
1961
1962static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) 1922static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
1963{ 1923{
1964 struct f2fs_nm_info *nm_i = NM_I(sbi); 1924 struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -1980,21 +1940,6 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
1980 1940
1981 if (nm_i->nid_cnt[FREE_NID_LIST]) 1941 if (nm_i->nid_cnt[FREE_NID_LIST])
1982 return; 1942 return;
1983
1984 /* try to find free nids with nat_bits */
1985 if (!scan_nat_bits(sbi) && nm_i->nid_cnt[FREE_NID_LIST])
1986 return;
1987 }
1988
1989 /* find next valid candidate */
1990 if (enabled_nat_bits(sbi, NULL)) {
1991 int idx = find_next_zero_bit_le(nm_i->full_nat_bits,
1992 nm_i->nat_blocks, 0);
1993
1994 if (idx >= nm_i->nat_blocks)
1995 set_sbi_flag(sbi, SBI_NEED_FSCK);
1996 else
1997 nid = idx * NAT_ENTRY_PER_BLOCK;
1998 } 1943 }
1999 1944
2000 /* readahead nat pages to be scanned */ 1945 /* readahead nat pages to be scanned */
@@ -2081,7 +2026,7 @@ retry:
2081 __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false); 2026 __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false);
2082 nm_i->available_nids--; 2027 nm_i->available_nids--;
2083 2028
2084 update_free_nid_bitmap(sbi, *nid, false); 2029 update_free_nid_bitmap(sbi, *nid, false, false, false);
2085 2030
2086 spin_unlock(&nm_i->nid_list_lock); 2031 spin_unlock(&nm_i->nid_list_lock);
2087 return true; 2032 return true;
@@ -2137,7 +2082,7 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
2137 2082
2138 nm_i->available_nids++; 2083 nm_i->available_nids++;
2139 2084
2140 update_free_nid_bitmap(sbi, nid, true); 2085 update_free_nid_bitmap(sbi, nid, true, false, false);
2141 2086
2142 spin_unlock(&nm_i->nid_list_lock); 2087 spin_unlock(&nm_i->nid_list_lock);
2143 2088
@@ -2383,7 +2328,7 @@ add_out:
2383 list_add_tail(&nes->set_list, head); 2328 list_add_tail(&nes->set_list, head);
2384} 2329}
2385 2330
2386void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, 2331static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
2387 struct page *page) 2332 struct page *page)
2388{ 2333{
2389 struct f2fs_nm_info *nm_i = NM_I(sbi); 2334 struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -2402,16 +2347,16 @@ void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
2402 valid++; 2347 valid++;
2403 } 2348 }
2404 if (valid == 0) { 2349 if (valid == 0) {
2405 set_bit_le(nat_index, nm_i->empty_nat_bits); 2350 __set_bit_le(nat_index, nm_i->empty_nat_bits);
2406 clear_bit_le(nat_index, nm_i->full_nat_bits); 2351 __clear_bit_le(nat_index, nm_i->full_nat_bits);
2407 return; 2352 return;
2408 } 2353 }
2409 2354
2410 clear_bit_le(nat_index, nm_i->empty_nat_bits); 2355 __clear_bit_le(nat_index, nm_i->empty_nat_bits);
2411 if (valid == NAT_ENTRY_PER_BLOCK) 2356 if (valid == NAT_ENTRY_PER_BLOCK)
2412 set_bit_le(nat_index, nm_i->full_nat_bits); 2357 __set_bit_le(nat_index, nm_i->full_nat_bits);
2413 else 2358 else
2414 clear_bit_le(nat_index, nm_i->full_nat_bits); 2359 __clear_bit_le(nat_index, nm_i->full_nat_bits);
2415} 2360}
2416 2361
2417static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, 2362static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
@@ -2467,11 +2412,11 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2467 add_free_nid(sbi, nid, false); 2412 add_free_nid(sbi, nid, false);
2468 spin_lock(&NM_I(sbi)->nid_list_lock); 2413 spin_lock(&NM_I(sbi)->nid_list_lock);
2469 NM_I(sbi)->available_nids++; 2414 NM_I(sbi)->available_nids++;
2470 update_free_nid_bitmap(sbi, nid, true); 2415 update_free_nid_bitmap(sbi, nid, true, false, false);
2471 spin_unlock(&NM_I(sbi)->nid_list_lock); 2416 spin_unlock(&NM_I(sbi)->nid_list_lock);
2472 } else { 2417 } else {
2473 spin_lock(&NM_I(sbi)->nid_list_lock); 2418 spin_lock(&NM_I(sbi)->nid_list_lock);
2474 update_free_nid_bitmap(sbi, nid, false); 2419 update_free_nid_bitmap(sbi, nid, false, false, false);
2475 spin_unlock(&NM_I(sbi)->nid_list_lock); 2420 spin_unlock(&NM_I(sbi)->nid_list_lock);
2476 } 2421 }
2477 } 2422 }
@@ -2577,6 +2522,40 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
2577 return 0; 2522 return 0;
2578} 2523}
2579 2524
2525inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
2526{
2527 struct f2fs_nm_info *nm_i = NM_I(sbi);
2528 unsigned int i = 0;
2529 nid_t nid, last_nid;
2530
2531 if (!enabled_nat_bits(sbi, NULL))
2532 return;
2533
2534 for (i = 0; i < nm_i->nat_blocks; i++) {
2535 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
2536 if (i >= nm_i->nat_blocks)
2537 break;
2538
2539 __set_bit_le(i, nm_i->nat_block_bitmap);
2540
2541 nid = i * NAT_ENTRY_PER_BLOCK;
2542 last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK;
2543
2544 spin_lock(&nm_i->free_nid_lock);
2545 for (; nid < last_nid; nid++)
2546 update_free_nid_bitmap(sbi, nid, true, true, true);
2547 spin_unlock(&nm_i->free_nid_lock);
2548 }
2549
2550 for (i = 0; i < nm_i->nat_blocks; i++) {
2551 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
2552 if (i >= nm_i->nat_blocks)
2553 break;
2554
2555 __set_bit_le(i, nm_i->nat_block_bitmap);
2556 }
2557}
2558
2580static int init_node_manager(struct f2fs_sb_info *sbi) 2559static int init_node_manager(struct f2fs_sb_info *sbi)
2581{ 2560{
2582 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); 2561 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
@@ -2638,7 +2617,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
2638 return 0; 2617 return 0;
2639} 2618}
2640 2619
2641int init_free_nid_cache(struct f2fs_sb_info *sbi) 2620static int init_free_nid_cache(struct f2fs_sb_info *sbi)
2642{ 2621{
2643 struct f2fs_nm_info *nm_i = NM_I(sbi); 2622 struct f2fs_nm_info *nm_i = NM_I(sbi);
2644 2623
@@ -2651,6 +2630,14 @@ int init_free_nid_cache(struct f2fs_sb_info *sbi)
2651 GFP_KERNEL); 2630 GFP_KERNEL);
2652 if (!nm_i->nat_block_bitmap) 2631 if (!nm_i->nat_block_bitmap)
2653 return -ENOMEM; 2632 return -ENOMEM;
2633
2634 nm_i->free_nid_count = f2fs_kvzalloc(nm_i->nat_blocks *
2635 sizeof(unsigned short), GFP_KERNEL);
2636 if (!nm_i->free_nid_count)
2637 return -ENOMEM;
2638
2639 spin_lock_init(&nm_i->free_nid_lock);
2640
2654 return 0; 2641 return 0;
2655} 2642}
2656 2643
@@ -2670,6 +2657,9 @@ int build_node_manager(struct f2fs_sb_info *sbi)
2670 if (err) 2657 if (err)
2671 return err; 2658 return err;
2672 2659
2660 /* load free nid status from nat_bits table */
2661 load_free_nid_bitmap(sbi);
2662
2673 build_free_nids(sbi, true, true); 2663 build_free_nids(sbi, true, true);
2674 return 0; 2664 return 0;
2675} 2665}
@@ -2730,6 +2720,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
2730 2720
2731 kvfree(nm_i->nat_block_bitmap); 2721 kvfree(nm_i->nat_block_bitmap);
2732 kvfree(nm_i->free_nid_bitmap); 2722 kvfree(nm_i->free_nid_bitmap);
2723 kvfree(nm_i->free_nid_count);
2733 2724
2734 kfree(nm_i->nat_bitmap); 2725 kfree(nm_i->nat_bitmap);
2735 kfree(nm_i->nat_bits); 2726 kfree(nm_i->nat_bits);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 4bd7a8b19332..29ef7088c558 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1163,6 +1163,12 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
1163 if (f2fs_discard_en(sbi) && 1163 if (f2fs_discard_en(sbi) &&
1164 !f2fs_test_and_set_bit(offset, se->discard_map)) 1164 !f2fs_test_and_set_bit(offset, se->discard_map))
1165 sbi->discard_blks--; 1165 sbi->discard_blks--;
1166
1167 /* don't overwrite by SSR to keep node chain */
1168 if (se->type == CURSEG_WARM_NODE) {
1169 if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
1170 se->ckpt_valid_blocks++;
1171 }
1166 } else { 1172 } else {
1167 if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) { 1173 if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) {
1168#ifdef CONFIG_F2FS_CHECK_FS 1174#ifdef CONFIG_F2FS_CHECK_FS
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 8f96461236f6..dde861387a40 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -136,17 +136,26 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
136 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; 136 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
137 vma->vm_ops = &hugetlb_vm_ops; 137 vma->vm_ops = &hugetlb_vm_ops;
138 138
139 /*
140 * Offset passed to mmap (before page shift) could have been
141 * negative when represented as a (l)off_t.
142 */
143 if (((loff_t)vma->vm_pgoff << PAGE_SHIFT) < 0)
144 return -EINVAL;
145
139 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) 146 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
140 return -EINVAL; 147 return -EINVAL;
141 148
142 vma_len = (loff_t)(vma->vm_end - vma->vm_start); 149 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
150 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
151 /* check for overflow */
152 if (len < vma_len)
153 return -EINVAL;
143 154
144 inode_lock(inode); 155 inode_lock(inode);
145 file_accessed(file); 156 file_accessed(file);
146 157
147 ret = -ENOMEM; 158 ret = -ENOMEM;
148 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
149
150 if (hugetlb_reserve_pages(inode, 159 if (hugetlb_reserve_pages(inode,
151 vma->vm_pgoff >> huge_page_order(h), 160 vma->vm_pgoff >> huge_page_order(h),
152 len >> huge_page_shift(h), vma, 161 len >> huge_page_shift(h), vma,
@@ -155,7 +164,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
155 164
156 ret = 0; 165 ret = 0;
157 if (vma->vm_flags & VM_WRITE && inode->i_size < len) 166 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
158 inode->i_size = len; 167 i_size_write(inode, len);
159out: 168out:
160 inode_unlock(inode); 169 inode_unlock(inode);
161 170
@@ -695,14 +704,11 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb,
695 704
696 inode = new_inode(sb); 705 inode = new_inode(sb);
697 if (inode) { 706 if (inode) {
698 struct hugetlbfs_inode_info *info;
699 inode->i_ino = get_next_ino(); 707 inode->i_ino = get_next_ino();
700 inode->i_mode = S_IFDIR | config->mode; 708 inode->i_mode = S_IFDIR | config->mode;
701 inode->i_uid = config->uid; 709 inode->i_uid = config->uid;
702 inode->i_gid = config->gid; 710 inode->i_gid = config->gid;
703 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 711 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
704 info = HUGETLBFS_I(inode);
705 mpol_shared_policy_init(&info->policy, NULL);
706 inode->i_op = &hugetlbfs_dir_inode_operations; 712 inode->i_op = &hugetlbfs_dir_inode_operations;
707 inode->i_fop = &simple_dir_operations; 713 inode->i_fop = &simple_dir_operations;
708 /* directory inodes start off with i_nlink == 2 (for "." entry) */ 714 /* directory inodes start off with i_nlink == 2 (for "." entry) */
@@ -733,7 +739,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
733 739
734 inode = new_inode(sb); 740 inode = new_inode(sb);
735 if (inode) { 741 if (inode) {
736 struct hugetlbfs_inode_info *info;
737 inode->i_ino = get_next_ino(); 742 inode->i_ino = get_next_ino();
738 inode_init_owner(inode, dir, mode); 743 inode_init_owner(inode, dir, mode);
739 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, 744 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
@@ -741,15 +746,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
741 inode->i_mapping->a_ops = &hugetlbfs_aops; 746 inode->i_mapping->a_ops = &hugetlbfs_aops;
742 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 747 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
743 inode->i_mapping->private_data = resv_map; 748 inode->i_mapping->private_data = resv_map;
744 info = HUGETLBFS_I(inode);
745 /*
746 * The policy is initialized here even if we are creating a
747 * private inode because initialization simply creates an
748 * an empty rb tree and calls rwlock_init(), later when we
749 * call mpol_free_shared_policy() it will just return because
750 * the rb tree will still be empty.
751 */
752 mpol_shared_policy_init(&info->policy, NULL);
753 switch (mode & S_IFMT) { 749 switch (mode & S_IFMT) {
754 default: 750 default:
755 init_special_inode(inode, mode, dev); 751 init_special_inode(inode, mode, dev);
@@ -937,6 +933,18 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
937 hugetlbfs_inc_free_inodes(sbinfo); 933 hugetlbfs_inc_free_inodes(sbinfo);
938 return NULL; 934 return NULL;
939 } 935 }
936
937 /*
938 * Any time after allocation, hugetlbfs_destroy_inode can be called
939 * for the inode. mpol_free_shared_policy is unconditionally called
940 * as part of hugetlbfs_destroy_inode. So, initialize policy here
941 * in case of a quick call to destroy.
942 *
943 * Note that the policy is initialized even if we are creating a
944 * private inode. This simplifies hugetlbfs_destroy_inode.
945 */
946 mpol_shared_policy_init(&p->policy, NULL);
947
940 return &p->vfs_inode; 948 return &p->vfs_inode;
941} 949}
942 950
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index a1a359bfcc9c..5adc2fb62b0f 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1125,10 +1125,8 @@ static journal_t *journal_init_common(struct block_device *bdev,
1125 1125
1126 /* Set up a default-sized revoke table for the new mount. */ 1126 /* Set up a default-sized revoke table for the new mount. */
1127 err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH); 1127 err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
1128 if (err) { 1128 if (err)
1129 kfree(journal); 1129 goto err_cleanup;
1130 return NULL;
1131 }
1132 1130
1133 spin_lock_init(&journal->j_history_lock); 1131 spin_lock_init(&journal->j_history_lock);
1134 1132
@@ -1145,23 +1143,25 @@ static journal_t *journal_init_common(struct block_device *bdev,
1145 journal->j_wbufsize = n; 1143 journal->j_wbufsize = n;
1146 journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *), 1144 journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *),
1147 GFP_KERNEL); 1145 GFP_KERNEL);
1148 if (!journal->j_wbuf) { 1146 if (!journal->j_wbuf)
1149 kfree(journal); 1147 goto err_cleanup;
1150 return NULL;
1151 }
1152 1148
1153 bh = getblk_unmovable(journal->j_dev, start, journal->j_blocksize); 1149 bh = getblk_unmovable(journal->j_dev, start, journal->j_blocksize);
1154 if (!bh) { 1150 if (!bh) {
1155 pr_err("%s: Cannot get buffer for journal superblock\n", 1151 pr_err("%s: Cannot get buffer for journal superblock\n",
1156 __func__); 1152 __func__);
1157 kfree(journal->j_wbuf); 1153 goto err_cleanup;
1158 kfree(journal);
1159 return NULL;
1160 } 1154 }
1161 journal->j_sb_buffer = bh; 1155 journal->j_sb_buffer = bh;
1162 journal->j_superblock = (journal_superblock_t *)bh->b_data; 1156 journal->j_superblock = (journal_superblock_t *)bh->b_data;
1163 1157
1164 return journal; 1158 return journal;
1159
1160err_cleanup:
1161 kfree(journal->j_wbuf);
1162 jbd2_journal_destroy_revoke(journal);
1163 kfree(journal);
1164 return NULL;
1165} 1165}
1166 1166
1167/* jbd2_journal_init_dev and jbd2_journal_init_inode: 1167/* jbd2_journal_init_dev and jbd2_journal_init_inode:
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index cfc38b552118..f9aefcda5854 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -280,6 +280,7 @@ int jbd2_journal_init_revoke(journal_t *journal, int hash_size)
280 280
281fail1: 281fail1:
282 jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]); 282 jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]);
283 journal->j_revoke_table[0] = NULL;
283fail0: 284fail0:
284 return -ENOMEM; 285 return -ENOMEM;
285} 286}
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 8e4dc7ab584c..ac2dfe0c5a9c 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -809,7 +809,8 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
809 if (kn->flags & KERNFS_HAS_MMAP) 809 if (kn->flags & KERNFS_HAS_MMAP)
810 unmap_mapping_range(inode->i_mapping, 0, 0, 1); 810 unmap_mapping_range(inode->i_mapping, 0, 0, 1);
811 811
812 kernfs_release_file(kn, of); 812 if (kn->flags & KERNFS_HAS_RELEASE)
813 kernfs_release_file(kn, of);
813 } 814 }
814 815
815 mutex_unlock(&kernfs_open_file_mutex); 816 mutex_unlock(&kernfs_open_file_mutex);
diff --git a/fs/namei.c b/fs/namei.c
index d41fab78798b..19dcf62133cc 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2145,6 +2145,9 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
2145 int retval = 0; 2145 int retval = 0;
2146 const char *s = nd->name->name; 2146 const char *s = nd->name->name;
2147 2147
2148 if (!*s)
2149 flags &= ~LOOKUP_RCU;
2150
2148 nd->last_type = LAST_ROOT; /* if there are only slashes... */ 2151 nd->last_type = LAST_ROOT; /* if there are only slashes... */
2149 nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT; 2152 nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
2150 nd->depth = 0; 2153 nd->depth = 0;
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index fb499a3f21b5..f92ba8d6c556 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -2055,7 +2055,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2055{ 2055{
2056 struct inode *old_inode = d_inode(old_dentry); 2056 struct inode *old_inode = d_inode(old_dentry);
2057 struct inode *new_inode = d_inode(new_dentry); 2057 struct inode *new_inode = d_inode(new_dentry);
2058 struct dentry *dentry = NULL, *rehash = NULL; 2058 struct dentry *dentry = NULL;
2059 struct rpc_task *task; 2059 struct rpc_task *task;
2060 int error = -EBUSY; 2060 int error = -EBUSY;
2061 2061
@@ -2078,10 +2078,8 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2078 * To prevent any new references to the target during the 2078 * To prevent any new references to the target during the
2079 * rename, we unhash the dentry in advance. 2079 * rename, we unhash the dentry in advance.
2080 */ 2080 */
2081 if (!d_unhashed(new_dentry)) { 2081 if (!d_unhashed(new_dentry))
2082 d_drop(new_dentry); 2082 d_drop(new_dentry);
2083 rehash = new_dentry;
2084 }
2085 2083
2086 if (d_count(new_dentry) > 2) { 2084 if (d_count(new_dentry) > 2) {
2087 int err; 2085 int err;
@@ -2098,7 +2096,6 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2098 goto out; 2096 goto out;
2099 2097
2100 new_dentry = dentry; 2098 new_dentry = dentry;
2101 rehash = NULL;
2102 new_inode = NULL; 2099 new_inode = NULL;
2103 } 2100 }
2104 } 2101 }
@@ -2119,8 +2116,6 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2119 error = task->tk_status; 2116 error = task->tk_status;
2120 rpc_put_task(task); 2117 rpc_put_task(task);
2121out: 2118out:
2122 if (rehash)
2123 d_rehash(rehash);
2124 trace_nfs_rename_exit(old_dir, old_dentry, 2119 trace_nfs_rename_exit(old_dir, old_dentry,
2125 new_dir, new_dentry, error); 2120 new_dir, new_dentry, error);
2126 /* new dentry created? */ 2121 /* new dentry created? */
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 44347f4bdc15..acd30baca461 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -202,10 +202,10 @@ static int filelayout_async_handle_error(struct rpc_task *task,
202 task->tk_status); 202 task->tk_status);
203 nfs4_mark_deviceid_unavailable(devid); 203 nfs4_mark_deviceid_unavailable(devid);
204 pnfs_error_mark_layout_for_return(inode, lseg); 204 pnfs_error_mark_layout_for_return(inode, lseg);
205 pnfs_set_lo_fail(lseg);
206 rpc_wake_up(&tbl->slot_tbl_waitq); 205 rpc_wake_up(&tbl->slot_tbl_waitq);
207 /* fall through */ 206 /* fall through */
208 default: 207 default:
208 pnfs_set_lo_fail(lseg);
209reset: 209reset:
210 dprintk("%s Retry through MDS. Error %d\n", __func__, 210 dprintk("%s Retry through MDS. Error %d\n", __func__,
211 task->tk_status); 211 task->tk_status);
@@ -560,6 +560,50 @@ filelayout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
560 return PNFS_ATTEMPTED; 560 return PNFS_ATTEMPTED;
561} 561}
562 562
563static int
564filelayout_check_deviceid(struct pnfs_layout_hdr *lo,
565 struct nfs4_filelayout_segment *fl,
566 gfp_t gfp_flags)
567{
568 struct nfs4_deviceid_node *d;
569 struct nfs4_file_layout_dsaddr *dsaddr;
570 int status = -EINVAL;
571
572 /* find and reference the deviceid */
573 d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), &fl->deviceid,
574 lo->plh_lc_cred, gfp_flags);
575 if (d == NULL)
576 goto out;
577
578 dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node);
579 /* Found deviceid is unavailable */
580 if (filelayout_test_devid_unavailable(&dsaddr->id_node))
581 goto out_put;
582
583 fl->dsaddr = dsaddr;
584
585 if (fl->first_stripe_index >= dsaddr->stripe_count) {
586 dprintk("%s Bad first_stripe_index %u\n",
587 __func__, fl->first_stripe_index);
588 goto out_put;
589 }
590
591 if ((fl->stripe_type == STRIPE_SPARSE &&
592 fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) ||
593 (fl->stripe_type == STRIPE_DENSE &&
594 fl->num_fh != dsaddr->stripe_count)) {
595 dprintk("%s num_fh %u not valid for given packing\n",
596 __func__, fl->num_fh);
597 goto out_put;
598 }
599 status = 0;
600out:
601 return status;
602out_put:
603 nfs4_fl_put_deviceid(dsaddr);
604 goto out;
605}
606
563/* 607/*
564 * filelayout_check_layout() 608 * filelayout_check_layout()
565 * 609 *
@@ -572,11 +616,8 @@ static int
572filelayout_check_layout(struct pnfs_layout_hdr *lo, 616filelayout_check_layout(struct pnfs_layout_hdr *lo,
573 struct nfs4_filelayout_segment *fl, 617 struct nfs4_filelayout_segment *fl,
574 struct nfs4_layoutget_res *lgr, 618 struct nfs4_layoutget_res *lgr,
575 struct nfs4_deviceid *id,
576 gfp_t gfp_flags) 619 gfp_t gfp_flags)
577{ 620{
578 struct nfs4_deviceid_node *d;
579 struct nfs4_file_layout_dsaddr *dsaddr;
580 int status = -EINVAL; 621 int status = -EINVAL;
581 622
582 dprintk("--> %s\n", __func__); 623 dprintk("--> %s\n", __func__);
@@ -601,41 +642,10 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo,
601 goto out; 642 goto out;
602 } 643 }
603 644
604 /* find and reference the deviceid */
605 d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), id,
606 lo->plh_lc_cred, gfp_flags);
607 if (d == NULL)
608 goto out;
609
610 dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node);
611 /* Found deviceid is unavailable */
612 if (filelayout_test_devid_unavailable(&dsaddr->id_node))
613 goto out_put;
614
615 fl->dsaddr = dsaddr;
616
617 if (fl->first_stripe_index >= dsaddr->stripe_count) {
618 dprintk("%s Bad first_stripe_index %u\n",
619 __func__, fl->first_stripe_index);
620 goto out_put;
621 }
622
623 if ((fl->stripe_type == STRIPE_SPARSE &&
624 fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) ||
625 (fl->stripe_type == STRIPE_DENSE &&
626 fl->num_fh != dsaddr->stripe_count)) {
627 dprintk("%s num_fh %u not valid for given packing\n",
628 __func__, fl->num_fh);
629 goto out_put;
630 }
631
632 status = 0; 645 status = 0;
633out: 646out:
634 dprintk("--> %s returns %d\n", __func__, status); 647 dprintk("--> %s returns %d\n", __func__, status);
635 return status; 648 return status;
636out_put:
637 nfs4_fl_put_deviceid(dsaddr);
638 goto out;
639} 649}
640 650
641static void _filelayout_free_lseg(struct nfs4_filelayout_segment *fl) 651static void _filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
@@ -657,7 +667,6 @@ static int
657filelayout_decode_layout(struct pnfs_layout_hdr *flo, 667filelayout_decode_layout(struct pnfs_layout_hdr *flo,
658 struct nfs4_filelayout_segment *fl, 668 struct nfs4_filelayout_segment *fl,
659 struct nfs4_layoutget_res *lgr, 669 struct nfs4_layoutget_res *lgr,
660 struct nfs4_deviceid *id,
661 gfp_t gfp_flags) 670 gfp_t gfp_flags)
662{ 671{
663 struct xdr_stream stream; 672 struct xdr_stream stream;
@@ -682,9 +691,9 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
682 if (unlikely(!p)) 691 if (unlikely(!p))
683 goto out_err; 692 goto out_err;
684 693
685 memcpy(id, p, sizeof(*id)); 694 memcpy(&fl->deviceid, p, sizeof(fl->deviceid));
686 p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE); 695 p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
687 nfs4_print_deviceid(id); 696 nfs4_print_deviceid(&fl->deviceid);
688 697
689 nfl_util = be32_to_cpup(p++); 698 nfl_util = be32_to_cpup(p++);
690 if (nfl_util & NFL4_UFLG_COMMIT_THRU_MDS) 699 if (nfl_util & NFL4_UFLG_COMMIT_THRU_MDS)
@@ -831,15 +840,14 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
831{ 840{
832 struct nfs4_filelayout_segment *fl; 841 struct nfs4_filelayout_segment *fl;
833 int rc; 842 int rc;
834 struct nfs4_deviceid id;
835 843
836 dprintk("--> %s\n", __func__); 844 dprintk("--> %s\n", __func__);
837 fl = kzalloc(sizeof(*fl), gfp_flags); 845 fl = kzalloc(sizeof(*fl), gfp_flags);
838 if (!fl) 846 if (!fl)
839 return NULL; 847 return NULL;
840 848
841 rc = filelayout_decode_layout(layoutid, fl, lgr, &id, gfp_flags); 849 rc = filelayout_decode_layout(layoutid, fl, lgr, gfp_flags);
842 if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id, gfp_flags)) { 850 if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, gfp_flags)) {
843 _filelayout_free_lseg(fl); 851 _filelayout_free_lseg(fl);
844 return NULL; 852 return NULL;
845 } 853 }
@@ -888,18 +896,51 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
888 return min(stripe_unit - (unsigned int)stripe_offset, size); 896 return min(stripe_unit - (unsigned int)stripe_offset, size);
889} 897}
890 898
899static struct pnfs_layout_segment *
900fl_pnfs_update_layout(struct inode *ino,
901 struct nfs_open_context *ctx,
902 loff_t pos,
903 u64 count,
904 enum pnfs_iomode iomode,
905 bool strict_iomode,
906 gfp_t gfp_flags)
907{
908 struct pnfs_layout_segment *lseg = NULL;
909 struct pnfs_layout_hdr *lo;
910 struct nfs4_filelayout_segment *fl;
911 int status;
912
913 lseg = pnfs_update_layout(ino, ctx, pos, count, iomode, strict_iomode,
914 gfp_flags);
915 if (!lseg)
916 lseg = ERR_PTR(-ENOMEM);
917 if (IS_ERR(lseg))
918 goto out;
919
920 lo = NFS_I(ino)->layout;
921 fl = FILELAYOUT_LSEG(lseg);
922
923 status = filelayout_check_deviceid(lo, fl, gfp_flags);
924 if (status)
925 lseg = ERR_PTR(status);
926out:
927 if (IS_ERR(lseg))
928 pnfs_put_lseg(lseg);
929 return lseg;
930}
931
891static void 932static void
892filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio, 933filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
893 struct nfs_page *req) 934 struct nfs_page *req)
894{ 935{
895 if (!pgio->pg_lseg) { 936 if (!pgio->pg_lseg) {
896 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 937 pgio->pg_lseg = fl_pnfs_update_layout(pgio->pg_inode,
897 req->wb_context, 938 req->wb_context,
898 0, 939 0,
899 NFS4_MAX_UINT64, 940 NFS4_MAX_UINT64,
900 IOMODE_READ, 941 IOMODE_READ,
901 false, 942 false,
902 GFP_KERNEL); 943 GFP_KERNEL);
903 if (IS_ERR(pgio->pg_lseg)) { 944 if (IS_ERR(pgio->pg_lseg)) {
904 pgio->pg_error = PTR_ERR(pgio->pg_lseg); 945 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
905 pgio->pg_lseg = NULL; 946 pgio->pg_lseg = NULL;
@@ -919,13 +960,13 @@ filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
919 int status; 960 int status;
920 961
921 if (!pgio->pg_lseg) { 962 if (!pgio->pg_lseg) {
922 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 963 pgio->pg_lseg = fl_pnfs_update_layout(pgio->pg_inode,
923 req->wb_context, 964 req->wb_context,
924 0, 965 0,
925 NFS4_MAX_UINT64, 966 NFS4_MAX_UINT64,
926 IOMODE_RW, 967 IOMODE_RW,
927 false, 968 false,
928 GFP_NOFS); 969 GFP_NOFS);
929 if (IS_ERR(pgio->pg_lseg)) { 970 if (IS_ERR(pgio->pg_lseg)) {
930 pgio->pg_error = PTR_ERR(pgio->pg_lseg); 971 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
931 pgio->pg_lseg = NULL; 972 pgio->pg_lseg = NULL;
diff --git a/fs/nfs/filelayout/filelayout.h b/fs/nfs/filelayout/filelayout.h
index 2896cb833a11..79323b5dab0c 100644
--- a/fs/nfs/filelayout/filelayout.h
+++ b/fs/nfs/filelayout/filelayout.h
@@ -55,15 +55,16 @@ struct nfs4_file_layout_dsaddr {
55}; 55};
56 56
57struct nfs4_filelayout_segment { 57struct nfs4_filelayout_segment {
58 struct pnfs_layout_segment generic_hdr; 58 struct pnfs_layout_segment generic_hdr;
59 u32 stripe_type; 59 u32 stripe_type;
60 u32 commit_through_mds; 60 u32 commit_through_mds;
61 u32 stripe_unit; 61 u32 stripe_unit;
62 u32 first_stripe_index; 62 u32 first_stripe_index;
63 u64 pattern_offset; 63 u64 pattern_offset;
64 struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */ 64 struct nfs4_deviceid deviceid;
65 unsigned int num_fh; 65 struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */
66 struct nfs_fh **fh_array; 66 unsigned int num_fh;
67 struct nfs_fh **fh_array;
67}; 68};
68 69
69struct nfs4_filelayout { 70struct nfs4_filelayout {
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index 85fde93dff77..457cfeb1d5c1 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -208,6 +208,10 @@ static bool ff_layout_mirror_valid(struct pnfs_layout_segment *lseg,
208 } else 208 } else
209 goto outerr; 209 goto outerr;
210 } 210 }
211
212 if (IS_ERR(mirror->mirror_ds))
213 goto outerr;
214
211 if (mirror->mirror_ds->ds == NULL) { 215 if (mirror->mirror_ds->ds == NULL) {
212 struct nfs4_deviceid_node *devid; 216 struct nfs4_deviceid_node *devid;
213 devid = &mirror->mirror_ds->id_node; 217 devid = &mirror->mirror_ds->id_node;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index c780d98035cc..201ca3f2c4ba 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2442,17 +2442,14 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2442 } 2442 }
2443 2443
2444 nfs4_stateid_copy(&stateid, &delegation->stateid); 2444 nfs4_stateid_copy(&stateid, &delegation->stateid);
2445 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) { 2445 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) ||
2446 !test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
2447 &delegation->flags)) {
2446 rcu_read_unlock(); 2448 rcu_read_unlock();
2447 nfs_finish_clear_delegation_stateid(state, &stateid); 2449 nfs_finish_clear_delegation_stateid(state, &stateid);
2448 return; 2450 return;
2449 } 2451 }
2450 2452
2451 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags)) {
2452 rcu_read_unlock();
2453 return;
2454 }
2455
2456 cred = get_rpccred(delegation->cred); 2453 cred = get_rpccred(delegation->cred);
2457 rcu_read_unlock(); 2454 rcu_read_unlock();
2458 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred); 2455 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index cbeeda1e94a2..d86031b6ad79 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -2489,7 +2489,7 @@ bool nfsd4_spo_must_allow(struct svc_rqst *rqstp)
2489 2489
2490int nfsd4_max_reply(struct svc_rqst *rqstp, struct nfsd4_op *op) 2490int nfsd4_max_reply(struct svc_rqst *rqstp, struct nfsd4_op *op)
2491{ 2491{
2492 if (op->opnum == OP_ILLEGAL) 2492 if (op->opnum == OP_ILLEGAL || op->status == nfserr_notsupp)
2493 return op_encode_hdr_size * sizeof(__be32); 2493 return op_encode_hdr_size * sizeof(__be32);
2494 2494
2495 BUG_ON(OPDESC(op)->op_rsize_bop == NULL); 2495 BUG_ON(OPDESC(op)->op_rsize_bop == NULL);
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 73e75ac90525..8bf8f667a8cf 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -538,13 +538,21 @@ out_free:
538 538
539static ssize_t 539static ssize_t
540nfsd_print_version_support(char *buf, int remaining, const char *sep, 540nfsd_print_version_support(char *buf, int remaining, const char *sep,
541 unsigned vers, unsigned minor) 541 unsigned vers, int minor)
542{ 542{
543 const char *format = (minor == 0) ? "%s%c%u" : "%s%c%u.%u"; 543 const char *format = minor < 0 ? "%s%c%u" : "%s%c%u.%u";
544 bool supported = !!nfsd_vers(vers, NFSD_TEST); 544 bool supported = !!nfsd_vers(vers, NFSD_TEST);
545 545
546 if (vers == 4 && !nfsd_minorversion(minor, NFSD_TEST)) 546 if (vers == 4 && minor >= 0 &&
547 !nfsd_minorversion(minor, NFSD_TEST))
547 supported = false; 548 supported = false;
549 if (minor == 0 && supported)
550 /*
551 * special case for backward compatability.
552 * +4.0 is never reported, it is implied by
553 * +4, unless -4.0 is present.
554 */
555 return 0;
548 return snprintf(buf, remaining, format, sep, 556 return snprintf(buf, remaining, format, sep,
549 supported ? '+' : '-', vers, minor); 557 supported ? '+' : '-', vers, minor);
550} 558}
@@ -554,7 +562,6 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
554 char *mesg = buf; 562 char *mesg = buf;
555 char *vers, *minorp, sign; 563 char *vers, *minorp, sign;
556 int len, num, remaining; 564 int len, num, remaining;
557 unsigned minor;
558 ssize_t tlen = 0; 565 ssize_t tlen = 0;
559 char *sep; 566 char *sep;
560 struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id); 567 struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id);
@@ -575,6 +582,7 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
575 if (len <= 0) return -EINVAL; 582 if (len <= 0) return -EINVAL;
576 do { 583 do {
577 enum vers_op cmd; 584 enum vers_op cmd;
585 unsigned minor;
578 sign = *vers; 586 sign = *vers;
579 if (sign == '+' || sign == '-') 587 if (sign == '+' || sign == '-')
580 num = simple_strtol((vers+1), &minorp, 0); 588 num = simple_strtol((vers+1), &minorp, 0);
@@ -585,8 +593,8 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
585 return -EINVAL; 593 return -EINVAL;
586 if (kstrtouint(minorp+1, 0, &minor) < 0) 594 if (kstrtouint(minorp+1, 0, &minor) < 0)
587 return -EINVAL; 595 return -EINVAL;
588 } else 596 }
589 minor = 0; 597
590 cmd = sign == '-' ? NFSD_CLEAR : NFSD_SET; 598 cmd = sign == '-' ? NFSD_CLEAR : NFSD_SET;
591 switch(num) { 599 switch(num) {
592 case 2: 600 case 2:
@@ -594,8 +602,20 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
594 nfsd_vers(num, cmd); 602 nfsd_vers(num, cmd);
595 break; 603 break;
596 case 4: 604 case 4:
597 if (nfsd_minorversion(minor, cmd) >= 0) 605 if (*minorp == '.') {
598 break; 606 if (nfsd_minorversion(minor, cmd) < 0)
607 return -EINVAL;
608 } else if ((cmd == NFSD_SET) != nfsd_vers(num, NFSD_TEST)) {
609 /*
610 * Either we have +4 and no minors are enabled,
611 * or we have -4 and at least one minor is enabled.
612 * In either case, propagate 'cmd' to all minors.
613 */
614 minor = 0;
615 while (nfsd_minorversion(minor, cmd) >= 0)
616 minor++;
617 }
618 break;
599 default: 619 default:
600 return -EINVAL; 620 return -EINVAL;
601 } 621 }
@@ -612,9 +632,11 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
612 sep = ""; 632 sep = "";
613 remaining = SIMPLE_TRANSACTION_LIMIT; 633 remaining = SIMPLE_TRANSACTION_LIMIT;
614 for (num=2 ; num <= 4 ; num++) { 634 for (num=2 ; num <= 4 ; num++) {
635 int minor;
615 if (!nfsd_vers(num, NFSD_AVAIL)) 636 if (!nfsd_vers(num, NFSD_AVAIL))
616 continue; 637 continue;
617 minor = 0; 638
639 minor = -1;
618 do { 640 do {
619 len = nfsd_print_version_support(buf, remaining, 641 len = nfsd_print_version_support(buf, remaining,
620 sep, num, minor); 642 sep, num, minor);
@@ -624,7 +646,8 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
624 buf += len; 646 buf += len;
625 tlen += len; 647 tlen += len;
626 minor++; 648 minor++;
627 sep = " "; 649 if (len)
650 sep = " ";
628 } while (num == 4 && minor <= NFSD_SUPPORTED_MINOR_VERSION); 651 } while (num == 4 && minor <= NFSD_SUPPORTED_MINOR_VERSION);
629 } 652 }
630out: 653out:
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index fa82b7707e85..03a7e9da4da0 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -786,6 +786,7 @@ nfserrno (int errno)
786 { nfserr_serverfault, -ESERVERFAULT }, 786 { nfserr_serverfault, -ESERVERFAULT },
787 { nfserr_serverfault, -ENFILE }, 787 { nfserr_serverfault, -ENFILE },
788 { nfserr_io, -EUCLEAN }, 788 { nfserr_io, -EUCLEAN },
789 { nfserr_perm, -ENOKEY },
789 }; 790 };
790 int i; 791 int i;
791 792
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 786a4a2cb2d7..31e1f9593457 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -167,7 +167,8 @@ nfsd_adjust_nfsd_versions4(void)
167 167
168int nfsd_minorversion(u32 minorversion, enum vers_op change) 168int nfsd_minorversion(u32 minorversion, enum vers_op change)
169{ 169{
170 if (minorversion > NFSD_SUPPORTED_MINOR_VERSION) 170 if (minorversion > NFSD_SUPPORTED_MINOR_VERSION &&
171 change != NFSD_AVAIL)
171 return -1; 172 return -1;
172 switch(change) { 173 switch(change) {
173 case NFSD_SET: 174 case NFSD_SET:
@@ -415,23 +416,20 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
415 416
416void nfsd_reset_versions(void) 417void nfsd_reset_versions(void)
417{ 418{
418 int found_one = 0;
419 int i; 419 int i;
420 420
421 for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) { 421 for (i = 0; i < NFSD_NRVERS; i++)
422 if (nfsd_program.pg_vers[i]) 422 if (nfsd_vers(i, NFSD_TEST))
423 found_one = 1; 423 return;
424 }
425 424
426 if (!found_one) { 425 for (i = 0; i < NFSD_NRVERS; i++)
427 for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) 426 if (i != 4)
428 nfsd_program.pg_vers[i] = nfsd_version[i]; 427 nfsd_vers(i, NFSD_SET);
429#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) 428 else {
430 for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++) 429 int minor = 0;
431 nfsd_acl_program.pg_vers[i] = 430 while (nfsd_minorversion(minor, NFSD_SET) >= 0)
432 nfsd_acl_version[i]; 431 minor++;
433#endif 432 }
434 }
435} 433}
436 434
437/* 435/*
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 1656843e87d2..323f492e0822 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -91,6 +91,7 @@ slow:
91 return ERR_PTR(-ENOMEM); 91 return ERR_PTR(-ENOMEM);
92 } 92 }
93 d_instantiate(dentry, inode); 93 d_instantiate(dentry, inode);
94 dentry->d_flags |= DCACHE_RCUACCESS;
94 dentry->d_fsdata = (void *)ns->ops; 95 dentry->d_fsdata = (void *)ns->ops;
95 d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry); 96 d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry);
96 if (d) { 97 if (d) {
diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
index c4ab6fdf17a0..e1534c9bab16 100644
--- a/fs/orangefs/devorangefs-req.c
+++ b/fs/orangefs/devorangefs-req.c
@@ -208,14 +208,19 @@ restart:
208 continue; 208 continue;
209 /* 209 /*
210 * Skip ops whose filesystem we don't know about unless 210 * Skip ops whose filesystem we don't know about unless
211 * it is being mounted. 211 * it is being mounted or unmounted. It is possible for
212 * a filesystem we don't know about to be unmounted if
213 * it fails to mount in the kernel after userspace has
214 * been sent the mount request.
212 */ 215 */
213 /* XXX: is there a better way to detect this? */ 216 /* XXX: is there a better way to detect this? */
214 } else if (ret == -1 && 217 } else if (ret == -1 &&
215 !(op->upcall.type == 218 !(op->upcall.type ==
216 ORANGEFS_VFS_OP_FS_MOUNT || 219 ORANGEFS_VFS_OP_FS_MOUNT ||
217 op->upcall.type == 220 op->upcall.type ==
218 ORANGEFS_VFS_OP_GETATTR)) { 221 ORANGEFS_VFS_OP_GETATTR ||
222 op->upcall.type ==
223 ORANGEFS_VFS_OP_FS_UMOUNT)) {
219 gossip_debug(GOSSIP_DEV_DEBUG, 224 gossip_debug(GOSSIP_DEV_DEBUG,
220 "orangefs: skipping op tag %llu %s\n", 225 "orangefs: skipping op tag %llu %s\n",
221 llu(op->tag), get_opname_string(op)); 226 llu(op->tag), get_opname_string(op));
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index 5e48a0be9761..8afac46fcc87 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -249,6 +249,7 @@ struct orangefs_sb_info_s {
249 char devname[ORANGEFS_MAX_SERVER_ADDR_LEN]; 249 char devname[ORANGEFS_MAX_SERVER_ADDR_LEN];
250 struct super_block *sb; 250 struct super_block *sb;
251 int mount_pending; 251 int mount_pending;
252 int no_list;
252 struct list_head list; 253 struct list_head list;
253}; 254};
254 255
diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
index 67c24351a67f..629d8c917fa6 100644
--- a/fs/orangefs/super.c
+++ b/fs/orangefs/super.c
@@ -263,8 +263,13 @@ int orangefs_remount(struct orangefs_sb_info_s *orangefs_sb)
263 if (!new_op) 263 if (!new_op)
264 return -ENOMEM; 264 return -ENOMEM;
265 new_op->upcall.req.features.features = 0; 265 new_op->upcall.req.features.features = 0;
266 ret = service_operation(new_op, "orangefs_features", 0); 266 ret = service_operation(new_op, "orangefs_features",
267 orangefs_features = new_op->downcall.resp.features.features; 267 ORANGEFS_OP_PRIORITY | ORANGEFS_OP_NO_MUTEX);
268 if (!ret)
269 orangefs_features =
270 new_op->downcall.resp.features.features;
271 else
272 orangefs_features = 0;
268 op_release(new_op); 273 op_release(new_op);
269 } else { 274 } else {
270 orangefs_features = 0; 275 orangefs_features = 0;
@@ -488,7 +493,7 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
488 493
489 if (ret) { 494 if (ret) {
490 d = ERR_PTR(ret); 495 d = ERR_PTR(ret);
491 goto free_op; 496 goto free_sb_and_op;
492 } 497 }
493 498
494 /* 499 /*
@@ -514,6 +519,9 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
514 spin_unlock(&orangefs_superblocks_lock); 519 spin_unlock(&orangefs_superblocks_lock);
515 op_release(new_op); 520 op_release(new_op);
516 521
522 /* Must be removed from the list now. */
523 ORANGEFS_SB(sb)->no_list = 0;
524
517 if (orangefs_userspace_version >= 20906) { 525 if (orangefs_userspace_version >= 20906) {
518 new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES); 526 new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES);
519 if (!new_op) 527 if (!new_op)
@@ -528,6 +536,10 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
528 536
529 return dget(sb->s_root); 537 return dget(sb->s_root);
530 538
539free_sb_and_op:
540 /* Will call orangefs_kill_sb with sb not in list. */
541 ORANGEFS_SB(sb)->no_list = 1;
542 deactivate_locked_super(sb);
531free_op: 543free_op:
532 gossip_err("orangefs_mount: mount request failed with %d\n", ret); 544 gossip_err("orangefs_mount: mount request failed with %d\n", ret);
533 if (ret == -EINVAL) { 545 if (ret == -EINVAL) {
@@ -553,12 +565,14 @@ void orangefs_kill_sb(struct super_block *sb)
553 */ 565 */
554 orangefs_unmount_sb(sb); 566 orangefs_unmount_sb(sb);
555 567
556 /* remove the sb from our list of orangefs specific sb's */ 568 if (!ORANGEFS_SB(sb)->no_list) {
557 569 /* remove the sb from our list of orangefs specific sb's */
558 spin_lock(&orangefs_superblocks_lock); 570 spin_lock(&orangefs_superblocks_lock);
559 __list_del_entry(&ORANGEFS_SB(sb)->list); /* not list_del_init */ 571 /* not list_del_init */
560 ORANGEFS_SB(sb)->list.prev = NULL; 572 __list_del_entry(&ORANGEFS_SB(sb)->list);
561 spin_unlock(&orangefs_superblocks_lock); 573 ORANGEFS_SB(sb)->list.prev = NULL;
574 spin_unlock(&orangefs_superblocks_lock);
575 }
562 576
563 /* 577 /*
564 * make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us 578 * make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 8f91ec66baa3..d04ea4349909 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -1074,6 +1074,7 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
1074 1074
1075 if ((table->proc_handler == proc_dostring) || 1075 if ((table->proc_handler == proc_dostring) ||
1076 (table->proc_handler == proc_dointvec) || 1076 (table->proc_handler == proc_dointvec) ||
1077 (table->proc_handler == proc_douintvec) ||
1077 (table->proc_handler == proc_dointvec_minmax) || 1078 (table->proc_handler == proc_dointvec_minmax) ||
1078 (table->proc_handler == proc_dointvec_jiffies) || 1079 (table->proc_handler == proc_dointvec_jiffies) ||
1079 (table->proc_handler == proc_dointvec_userhz_jiffies) || 1080 (table->proc_handler == proc_dointvec_userhz_jiffies) ||
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f08bd31c1081..312578089544 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -900,7 +900,14 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
900static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, 900static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
901 unsigned long addr, pmd_t *pmdp) 901 unsigned long addr, pmd_t *pmdp)
902{ 902{
903 pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); 903 pmd_t pmd = *pmdp;
904
905 /* See comment in change_huge_pmd() */
906 pmdp_invalidate(vma, addr, pmdp);
907 if (pmd_dirty(*pmdp))
908 pmd = pmd_mkdirty(pmd);
909 if (pmd_young(*pmdp))
910 pmd = pmd_mkyoung(pmd);
904 911
905 pmd = pmd_wrprotect(pmd); 912 pmd = pmd_wrprotect(pmd);
906 pmd = pmd_clear_soft_dirty(pmd); 913 pmd = pmd_clear_soft_dirty(pmd);
diff --git a/fs/stat.c b/fs/stat.c
index fa0be59340cc..c6c963b2546b 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -130,9 +130,13 @@ EXPORT_SYMBOL(vfs_getattr);
130int vfs_statx_fd(unsigned int fd, struct kstat *stat, 130int vfs_statx_fd(unsigned int fd, struct kstat *stat,
131 u32 request_mask, unsigned int query_flags) 131 u32 request_mask, unsigned int query_flags)
132{ 132{
133 struct fd f = fdget_raw(fd); 133 struct fd f;
134 int error = -EBADF; 134 int error = -EBADF;
135 135
136 if (query_flags & ~KSTAT_QUERY_FLAGS)
137 return -EINVAL;
138
139 f = fdget_raw(fd);
136 if (f.file) { 140 if (f.file) {
137 error = vfs_getattr(&f.file->f_path, stat, 141 error = vfs_getattr(&f.file->f_path, stat,
138 request_mask, query_flags); 142 request_mask, query_flags);
@@ -155,9 +159,6 @@ EXPORT_SYMBOL(vfs_statx_fd);
155 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink 159 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
156 * at the given name from being referenced. 160 * at the given name from being referenced.
157 * 161 *
158 * The caller must have preset stat->request_mask as for vfs_getattr(). The
159 * flags are also used to load up stat->query_flags.
160 *
161 * 0 will be returned on success, and a -ve error code if unsuccessful. 162 * 0 will be returned on success, and a -ve error code if unsuccessful.
162 */ 163 */
163int vfs_statx(int dfd, const char __user *filename, int flags, 164int vfs_statx(int dfd, const char __user *filename, int flags,
@@ -509,46 +510,38 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
509} 510}
510#endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */ 511#endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
511 512
512static inline int __put_timestamp(struct timespec *kts, 513static noinline_for_stack int
513 struct statx_timestamp __user *uts) 514cp_statx(const struct kstat *stat, struct statx __user *buffer)
514{
515 return (__put_user(kts->tv_sec, &uts->tv_sec ) ||
516 __put_user(kts->tv_nsec, &uts->tv_nsec ) ||
517 __put_user(0, &uts->__reserved ));
518}
519
520/*
521 * Set the statx results.
522 */
523static long statx_set_result(struct kstat *stat, struct statx __user *buffer)
524{ 515{
525 uid_t uid = from_kuid_munged(current_user_ns(), stat->uid); 516 struct statx tmp;
526 gid_t gid = from_kgid_munged(current_user_ns(), stat->gid); 517
527 518 memset(&tmp, 0, sizeof(tmp));
528 if (__put_user(stat->result_mask, &buffer->stx_mask ) || 519
529 __put_user(stat->mode, &buffer->stx_mode ) || 520 tmp.stx_mask = stat->result_mask;
530 __clear_user(&buffer->__spare0, sizeof(buffer->__spare0)) || 521 tmp.stx_blksize = stat->blksize;
531 __put_user(stat->nlink, &buffer->stx_nlink ) || 522 tmp.stx_attributes = stat->attributes;
532 __put_user(uid, &buffer->stx_uid ) || 523 tmp.stx_nlink = stat->nlink;
533 __put_user(gid, &buffer->stx_gid ) || 524 tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid);
534 __put_user(stat->attributes, &buffer->stx_attributes ) || 525 tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid);
535 __put_user(stat->blksize, &buffer->stx_blksize ) || 526 tmp.stx_mode = stat->mode;
536 __put_user(MAJOR(stat->rdev), &buffer->stx_rdev_major ) || 527 tmp.stx_ino = stat->ino;
537 __put_user(MINOR(stat->rdev), &buffer->stx_rdev_minor ) || 528 tmp.stx_size = stat->size;
538 __put_user(MAJOR(stat->dev), &buffer->stx_dev_major ) || 529 tmp.stx_blocks = stat->blocks;
539 __put_user(MINOR(stat->dev), &buffer->stx_dev_minor ) || 530 tmp.stx_attributes_mask = stat->attributes_mask;
540 __put_timestamp(&stat->atime, &buffer->stx_atime ) || 531 tmp.stx_atime.tv_sec = stat->atime.tv_sec;
541 __put_timestamp(&stat->btime, &buffer->stx_btime ) || 532 tmp.stx_atime.tv_nsec = stat->atime.tv_nsec;
542 __put_timestamp(&stat->ctime, &buffer->stx_ctime ) || 533 tmp.stx_btime.tv_sec = stat->btime.tv_sec;
543 __put_timestamp(&stat->mtime, &buffer->stx_mtime ) || 534 tmp.stx_btime.tv_nsec = stat->btime.tv_nsec;
544 __put_user(stat->ino, &buffer->stx_ino ) || 535 tmp.stx_ctime.tv_sec = stat->ctime.tv_sec;
545 __put_user(stat->size, &buffer->stx_size ) || 536 tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec;
546 __put_user(stat->blocks, &buffer->stx_blocks ) || 537 tmp.stx_mtime.tv_sec = stat->mtime.tv_sec;
547 __clear_user(&buffer->__spare1, sizeof(buffer->__spare1)) || 538 tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec;
548 __clear_user(&buffer->__spare2, sizeof(buffer->__spare2))) 539 tmp.stx_rdev_major = MAJOR(stat->rdev);
549 return -EFAULT; 540 tmp.stx_rdev_minor = MINOR(stat->rdev);
550 541 tmp.stx_dev_major = MAJOR(stat->dev);
551 return 0; 542 tmp.stx_dev_minor = MINOR(stat->dev);
543
544 return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
552} 545}
553 546
554/** 547/**
@@ -570,10 +563,10 @@ SYSCALL_DEFINE5(statx,
570 struct kstat stat; 563 struct kstat stat;
571 int error; 564 int error;
572 565
566 if (mask & STATX__RESERVED)
567 return -EINVAL;
573 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) 568 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
574 return -EINVAL; 569 return -EINVAL;
575 if (!access_ok(VERIFY_WRITE, buffer, sizeof(*buffer)))
576 return -EFAULT;
577 570
578 if (filename) 571 if (filename)
579 error = vfs_statx(dfd, filename, flags, &stat, mask); 572 error = vfs_statx(dfd, filename, flags, &stat, mask);
@@ -581,7 +574,8 @@ SYSCALL_DEFINE5(statx,
581 error = vfs_statx_fd(dfd, &stat, mask, flags); 574 error = vfs_statx_fd(dfd, &stat, mask, flags);
582 if (error) 575 if (error)
583 return error; 576 return error;
584 return statx_set_result(&stat, buffer); 577
578 return cp_statx(&stat, buffer);
585} 579}
586 580
587/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ 581/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index b803213d1307..39c75a86c67f 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -108,7 +108,7 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
108{ 108{
109 const struct sysfs_ops *ops = sysfs_file_ops(of->kn); 109 const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
110 struct kobject *kobj = of->kn->parent->priv; 110 struct kobject *kobj = of->kn->parent->priv;
111 size_t len; 111 ssize_t len;
112 112
113 /* 113 /*
114 * If buf != of->prealloc_buf, we don't know how 114 * If buf != of->prealloc_buf, we don't know how
@@ -117,13 +117,15 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
117 if (WARN_ON_ONCE(buf != of->prealloc_buf)) 117 if (WARN_ON_ONCE(buf != of->prealloc_buf))
118 return 0; 118 return 0;
119 len = ops->show(kobj, of->kn->priv, buf); 119 len = ops->show(kobj, of->kn->priv, buf);
120 if (len < 0)
121 return len;
120 if (pos) { 122 if (pos) {
121 if (len <= pos) 123 if (len <= pos)
122 return 0; 124 return 0;
123 len -= pos; 125 len -= pos;
124 memmove(buf, buf + pos, len); 126 memmove(buf, buf + pos, len);
125 } 127 }
126 return min(count, len); 128 return min_t(ssize_t, count, len);
127} 129}
128 130
129/* kernfs write callback for regular sysfs files */ 131/* kernfs write callback for regular sysfs files */
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 1e712a364680..718b749fa11a 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -32,6 +32,7 @@
32#include <linux/math64.h> 32#include <linux/math64.h>
33#include <linux/uaccess.h> 33#include <linux/uaccess.h>
34#include <linux/random.h> 34#include <linux/random.h>
35#include <linux/ctype.h>
35#include "ubifs.h" 36#include "ubifs.h"
36 37
37static DEFINE_SPINLOCK(dbg_lock); 38static DEFINE_SPINLOCK(dbg_lock);
@@ -286,8 +287,10 @@ void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode)
286 break; 287 break;
287 } 288 }
288 289
289 pr_err("\t%d: %s (%s)\n", 290 pr_err("\t%d: inode %llu, type %s, len %d\n",
290 count++, dent->name, get_dent_type(dent->type)); 291 count++, (unsigned long long) le64_to_cpu(dent->inum),
292 get_dent_type(dent->type),
293 le16_to_cpu(dent->nlen));
291 294
292 fname_name(&nm) = dent->name; 295 fname_name(&nm) = dent->name;
293 fname_len(&nm) = le16_to_cpu(dent->nlen); 296 fname_len(&nm) = le16_to_cpu(dent->nlen);
@@ -464,7 +467,8 @@ void ubifs_dump_node(const struct ubifs_info *c, const void *node)
464 pr_err("(bad name length, not printing, bad or corrupted node)"); 467 pr_err("(bad name length, not printing, bad or corrupted node)");
465 else { 468 else {
466 for (i = 0; i < nlen && dent->name[i]; i++) 469 for (i = 0; i < nlen && dent->name[i]; i++)
467 pr_cont("%c", dent->name[i]); 470 pr_cont("%c", isprint(dent->name[i]) ?
471 dent->name[i] : '?');
468 } 472 }
469 pr_cont("\n"); 473 pr_cont("\n");
470 474
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 30825d882aa9..b777bddaa1dd 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -606,8 +606,8 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
606 } 606 }
607 607
608 while (1) { 608 while (1) {
609 dbg_gen("feed '%s', ino %llu, new f_pos %#x", 609 dbg_gen("ino %llu, new f_pos %#x",
610 dent->name, (unsigned long long)le64_to_cpu(dent->inum), 610 (unsigned long long)le64_to_cpu(dent->inum),
611 key_hash_flash(c, &dent->key)); 611 key_hash_flash(c, &dent->key));
612 ubifs_assert(le64_to_cpu(dent->ch.sqnum) > 612 ubifs_assert(le64_to_cpu(dent->ch.sqnum) >
613 ubifs_inode(dir)->creat_sqnum); 613 ubifs_inode(dir)->creat_sqnum);
@@ -748,6 +748,11 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
748 goto out_fname; 748 goto out_fname;
749 749
750 lock_2_inodes(dir, inode); 750 lock_2_inodes(dir, inode);
751
752 /* Handle O_TMPFILE corner case, it is allowed to link a O_TMPFILE. */
753 if (inode->i_nlink == 0)
754 ubifs_delete_orphan(c, inode->i_ino);
755
751 inc_nlink(inode); 756 inc_nlink(inode);
752 ihold(inode); 757 ihold(inode);
753 inode->i_ctime = ubifs_current_time(inode); 758 inode->i_ctime = ubifs_current_time(inode);
@@ -768,6 +773,8 @@ out_cancel:
768 dir->i_size -= sz_change; 773 dir->i_size -= sz_change;
769 dir_ui->ui_size = dir->i_size; 774 dir_ui->ui_size = dir->i_size;
770 drop_nlink(inode); 775 drop_nlink(inode);
776 if (inode->i_nlink == 0)
777 ubifs_add_orphan(c, inode->i_ino);
771 unlock_2_inodes(dir, inode); 778 unlock_2_inodes(dir, inode);
772 ubifs_release_budget(c, &req); 779 ubifs_release_budget(c, &req);
773 iput(inode); 780 iput(inode);
@@ -1068,8 +1075,10 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
1068 } 1075 }
1069 1076
1070 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); 1077 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
1071 if (err) 1078 if (err) {
1079 kfree(dev);
1072 goto out_budg; 1080 goto out_budg;
1081 }
1073 1082
1074 sz_change = CALC_DENT_SIZE(fname_len(&nm)); 1083 sz_change = CALC_DENT_SIZE(fname_len(&nm));
1075 1084
@@ -1316,9 +1325,6 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
1316 unsigned int uninitialized_var(saved_nlink); 1325 unsigned int uninitialized_var(saved_nlink);
1317 struct fscrypt_name old_nm, new_nm; 1326 struct fscrypt_name old_nm, new_nm;
1318 1327
1319 if (flags & ~RENAME_NOREPLACE)
1320 return -EINVAL;
1321
1322 /* 1328 /*
1323 * Budget request settings: deletion direntry, new direntry, removing 1329 * Budget request settings: deletion direntry, new direntry, removing
1324 * the old inode, and changing old and new parent directory inodes. 1330 * the old inode, and changing old and new parent directory inodes.
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 1d227b0fcf49..f7555fc25877 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1756,7 +1756,7 @@ static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
1756 * protocols: aa:... bb:... 1756 * protocols: aa:... bb:...
1757 */ 1757 */
1758 seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n", 1758 seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
1759 pending, total, UFFD_API, UFFD_API_FEATURES, 1759 pending, total, UFFD_API, ctx->features,
1760 UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS); 1760 UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
1761} 1761}
1762#endif 1762#endif
diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h
index eb00bc133bca..39f8604f764e 100644
--- a/fs/xfs/libxfs/xfs_dir2_priv.h
+++ b/fs/xfs/libxfs/xfs_dir2_priv.h
@@ -125,8 +125,7 @@ extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
125extern int xfs_dir2_sf_lookup(struct xfs_da_args *args); 125extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
126extern int xfs_dir2_sf_removename(struct xfs_da_args *args); 126extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
127extern int xfs_dir2_sf_replace(struct xfs_da_args *args); 127extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
128extern int xfs_dir2_sf_verify(struct xfs_mount *mp, struct xfs_dir2_sf_hdr *sfp, 128extern int xfs_dir2_sf_verify(struct xfs_inode *ip);
129 int size);
130 129
131/* xfs_dir2_readdir.c */ 130/* xfs_dir2_readdir.c */
132extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx, 131extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx,
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index 96b45cd6c63f..e84af093b2ab 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -632,36 +632,49 @@ xfs_dir2_sf_check(
632/* Verify the consistency of an inline directory. */ 632/* Verify the consistency of an inline directory. */
633int 633int
634xfs_dir2_sf_verify( 634xfs_dir2_sf_verify(
635 struct xfs_mount *mp, 635 struct xfs_inode *ip)
636 struct xfs_dir2_sf_hdr *sfp,
637 int size)
638{ 636{
637 struct xfs_mount *mp = ip->i_mount;
638 struct xfs_dir2_sf_hdr *sfp;
639 struct xfs_dir2_sf_entry *sfep; 639 struct xfs_dir2_sf_entry *sfep;
640 struct xfs_dir2_sf_entry *next_sfep; 640 struct xfs_dir2_sf_entry *next_sfep;
641 char *endp; 641 char *endp;
642 const struct xfs_dir_ops *dops; 642 const struct xfs_dir_ops *dops;
643 struct xfs_ifork *ifp;
643 xfs_ino_t ino; 644 xfs_ino_t ino;
644 int i; 645 int i;
645 int i8count; 646 int i8count;
646 int offset; 647 int offset;
648 int size;
649 int error;
647 __uint8_t filetype; 650 __uint8_t filetype;
648 651
652 ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_LOCAL);
653 /*
654 * xfs_iread calls us before xfs_setup_inode sets up ip->d_ops,
655 * so we can only trust the mountpoint to have the right pointer.
656 */
649 dops = xfs_dir_get_ops(mp, NULL); 657 dops = xfs_dir_get_ops(mp, NULL);
650 658
659 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
660 sfp = (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data;
661 size = ifp->if_bytes;
662
651 /* 663 /*
652 * Give up if the directory is way too short. 664 * Give up if the directory is way too short.
653 */ 665 */
654 XFS_WANT_CORRUPTED_RETURN(mp, size > 666 if (size <= offsetof(struct xfs_dir2_sf_hdr, parent) ||
655 offsetof(struct xfs_dir2_sf_hdr, parent)); 667 size < xfs_dir2_sf_hdr_size(sfp->i8count))
656 XFS_WANT_CORRUPTED_RETURN(mp, size >= 668 return -EFSCORRUPTED;
657 xfs_dir2_sf_hdr_size(sfp->i8count));
658 669
659 endp = (char *)sfp + size; 670 endp = (char *)sfp + size;
660 671
661 /* Check .. entry */ 672 /* Check .. entry */
662 ino = dops->sf_get_parent_ino(sfp); 673 ino = dops->sf_get_parent_ino(sfp);
663 i8count = ino > XFS_DIR2_MAX_SHORT_INUM; 674 i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
664 XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino)); 675 error = xfs_dir_ino_validate(mp, ino);
676 if (error)
677 return error;
665 offset = dops->data_first_offset; 678 offset = dops->data_first_offset;
666 679
667 /* Check all reported entries */ 680 /* Check all reported entries */
@@ -672,12 +685,12 @@ xfs_dir2_sf_verify(
672 * Check the fixed-offset parts of the structure are 685 * Check the fixed-offset parts of the structure are
673 * within the data buffer. 686 * within the data buffer.
674 */ 687 */
675 XFS_WANT_CORRUPTED_RETURN(mp, 688 if (((char *)sfep + sizeof(*sfep)) >= endp)
676 ((char *)sfep + sizeof(*sfep)) < endp); 689 return -EFSCORRUPTED;
677 690
678 /* Don't allow names with known bad length. */ 691 /* Don't allow names with known bad length. */
679 XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen > 0); 692 if (sfep->namelen == 0)
680 XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen < MAXNAMELEN); 693 return -EFSCORRUPTED;
681 694
682 /* 695 /*
683 * Check that the variable-length part of the structure is 696 * Check that the variable-length part of the structure is
@@ -685,33 +698,39 @@ xfs_dir2_sf_verify(
685 * name component, so nextentry is an acceptable test. 698 * name component, so nextentry is an acceptable test.
686 */ 699 */
687 next_sfep = dops->sf_nextentry(sfp, sfep); 700 next_sfep = dops->sf_nextentry(sfp, sfep);
688 XFS_WANT_CORRUPTED_RETURN(mp, endp >= (char *)next_sfep); 701 if (endp < (char *)next_sfep)
702 return -EFSCORRUPTED;
689 703
690 /* Check that the offsets always increase. */ 704 /* Check that the offsets always increase. */
691 XFS_WANT_CORRUPTED_RETURN(mp, 705 if (xfs_dir2_sf_get_offset(sfep) < offset)
692 xfs_dir2_sf_get_offset(sfep) >= offset); 706 return -EFSCORRUPTED;
693 707
694 /* Check the inode number. */ 708 /* Check the inode number. */
695 ino = dops->sf_get_ino(sfp, sfep); 709 ino = dops->sf_get_ino(sfp, sfep);
696 i8count += ino > XFS_DIR2_MAX_SHORT_INUM; 710 i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
697 XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino)); 711 error = xfs_dir_ino_validate(mp, ino);
712 if (error)
713 return error;
698 714
699 /* Check the file type. */ 715 /* Check the file type. */
700 filetype = dops->sf_get_ftype(sfep); 716 filetype = dops->sf_get_ftype(sfep);
701 XFS_WANT_CORRUPTED_RETURN(mp, filetype < XFS_DIR3_FT_MAX); 717 if (filetype >= XFS_DIR3_FT_MAX)
718 return -EFSCORRUPTED;
702 719
703 offset = xfs_dir2_sf_get_offset(sfep) + 720 offset = xfs_dir2_sf_get_offset(sfep) +
704 dops->data_entsize(sfep->namelen); 721 dops->data_entsize(sfep->namelen);
705 722
706 sfep = next_sfep; 723 sfep = next_sfep;
707 } 724 }
708 XFS_WANT_CORRUPTED_RETURN(mp, i8count == sfp->i8count); 725 if (i8count != sfp->i8count)
709 XFS_WANT_CORRUPTED_RETURN(mp, (void *)sfep == (void *)endp); 726 return -EFSCORRUPTED;
727 if ((void *)sfep != (void *)endp)
728 return -EFSCORRUPTED;
710 729
711 /* Make sure this whole thing ought to be in local format. */ 730 /* Make sure this whole thing ought to be in local format. */
712 XFS_WANT_CORRUPTED_RETURN(mp, offset + 731 if (offset + (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
713 (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) + 732 (uint)sizeof(xfs_dir2_block_tail_t) > mp->m_dir_geo->blksize)
714 (uint)sizeof(xfs_dir2_block_tail_t) <= mp->m_dir_geo->blksize); 733 return -EFSCORRUPTED;
715 734
716 return 0; 735 return 0;
717} 736}
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 9653e964eda4..8a37efe04de3 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -212,6 +212,16 @@ xfs_iformat_fork(
212 if (error) 212 if (error)
213 return error; 213 return error;
214 214
215 /* Check inline dir contents. */
216 if (S_ISDIR(VFS_I(ip)->i_mode) &&
217 dip->di_format == XFS_DINODE_FMT_LOCAL) {
218 error = xfs_dir2_sf_verify(ip);
219 if (error) {
220 xfs_idestroy_fork(ip, XFS_DATA_FORK);
221 return error;
222 }
223 }
224
215 if (xfs_is_reflink_inode(ip)) { 225 if (xfs_is_reflink_inode(ip)) {
216 ASSERT(ip->i_cowfp == NULL); 226 ASSERT(ip->i_cowfp == NULL);
217 xfs_ifork_init_cow(ip); 227 xfs_ifork_init_cow(ip);
@@ -322,8 +332,6 @@ xfs_iformat_local(
322 int whichfork, 332 int whichfork,
323 int size) 333 int size)
324{ 334{
325 int error;
326
327 /* 335 /*
328 * If the size is unreasonable, then something 336 * If the size is unreasonable, then something
329 * is wrong and we just bail out rather than crash in 337 * is wrong and we just bail out rather than crash in
@@ -339,14 +347,6 @@ xfs_iformat_local(
339 return -EFSCORRUPTED; 347 return -EFSCORRUPTED;
340 } 348 }
341 349
342 if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) {
343 error = xfs_dir2_sf_verify(ip->i_mount,
344 (struct xfs_dir2_sf_hdr *)XFS_DFORK_DPTR(dip),
345 size);
346 if (error)
347 return error;
348 }
349
350 xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size); 350 xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size);
351 return 0; 351 return 0;
352} 352}
@@ -867,7 +867,7 @@ xfs_iextents_copy(
867 * In these cases, the format always takes precedence, because the 867 * In these cases, the format always takes precedence, because the
868 * format indicates the current state of the fork. 868 * format indicates the current state of the fork.
869 */ 869 */
870int 870void
871xfs_iflush_fork( 871xfs_iflush_fork(
872 xfs_inode_t *ip, 872 xfs_inode_t *ip,
873 xfs_dinode_t *dip, 873 xfs_dinode_t *dip,
@@ -877,7 +877,6 @@ xfs_iflush_fork(
877 char *cp; 877 char *cp;
878 xfs_ifork_t *ifp; 878 xfs_ifork_t *ifp;
879 xfs_mount_t *mp; 879 xfs_mount_t *mp;
880 int error;
881 static const short brootflag[2] = 880 static const short brootflag[2] =
882 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT }; 881 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
883 static const short dataflag[2] = 882 static const short dataflag[2] =
@@ -886,7 +885,7 @@ xfs_iflush_fork(
886 { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; 885 { XFS_ILOG_DEXT, XFS_ILOG_AEXT };
887 886
888 if (!iip) 887 if (!iip)
889 return 0; 888 return;
890 ifp = XFS_IFORK_PTR(ip, whichfork); 889 ifp = XFS_IFORK_PTR(ip, whichfork);
891 /* 890 /*
892 * This can happen if we gave up in iformat in an error path, 891 * This can happen if we gave up in iformat in an error path,
@@ -894,19 +893,12 @@ xfs_iflush_fork(
894 */ 893 */
895 if (!ifp) { 894 if (!ifp) {
896 ASSERT(whichfork == XFS_ATTR_FORK); 895 ASSERT(whichfork == XFS_ATTR_FORK);
897 return 0; 896 return;
898 } 897 }
899 cp = XFS_DFORK_PTR(dip, whichfork); 898 cp = XFS_DFORK_PTR(dip, whichfork);
900 mp = ip->i_mount; 899 mp = ip->i_mount;
901 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 900 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
902 case XFS_DINODE_FMT_LOCAL: 901 case XFS_DINODE_FMT_LOCAL:
903 if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) {
904 error = xfs_dir2_sf_verify(mp,
905 (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data,
906 ifp->if_bytes);
907 if (error)
908 return error;
909 }
910 if ((iip->ili_fields & dataflag[whichfork]) && 902 if ((iip->ili_fields & dataflag[whichfork]) &&
911 (ifp->if_bytes > 0)) { 903 (ifp->if_bytes > 0)) {
912 ASSERT(ifp->if_u1.if_data != NULL); 904 ASSERT(ifp->if_u1.if_data != NULL);
@@ -959,7 +951,6 @@ xfs_iflush_fork(
959 ASSERT(0); 951 ASSERT(0);
960 break; 952 break;
961 } 953 }
962 return 0;
963} 954}
964 955
965/* 956/*
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index 132dc59fdde6..7fb8365326d1 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -140,7 +140,7 @@ typedef struct xfs_ifork {
140struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state); 140struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state);
141 141
142int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *); 142int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *);
143int xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *, 143void xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
144 struct xfs_inode_log_item *, int); 144 struct xfs_inode_log_item *, int);
145void xfs_idestroy_fork(struct xfs_inode *, int); 145void xfs_idestroy_fork(struct xfs_inode *, int);
146void xfs_idata_realloc(struct xfs_inode *, int, int); 146void xfs_idata_realloc(struct xfs_inode *, int, int);
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 8b75dcea5966..828532ce0adc 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1311,8 +1311,16 @@ xfs_free_file_space(
1311 /* 1311 /*
1312 * Now that we've unmap all full blocks we'll have to zero out any 1312 * Now that we've unmap all full blocks we'll have to zero out any
1313 * partial block at the beginning and/or end. xfs_zero_range is 1313 * partial block at the beginning and/or end. xfs_zero_range is
1314 * smart enough to skip any holes, including those we just created. 1314 * smart enough to skip any holes, including those we just created,
1315 * but we must take care not to zero beyond EOF and enlarge i_size.
1315 */ 1316 */
1317
1318 if (offset >= XFS_ISIZE(ip))
1319 return 0;
1320
1321 if (offset + len > XFS_ISIZE(ip))
1322 len = XFS_ISIZE(ip) - offset;
1323
1316 return xfs_zero_range(ip, offset, len, NULL); 1324 return xfs_zero_range(ip, offset, len, NULL);
1317} 1325}
1318 1326
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index c7fe2c2123ab..7605d8396596 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -50,6 +50,7 @@
50#include "xfs_log.h" 50#include "xfs_log.h"
51#include "xfs_bmap_btree.h" 51#include "xfs_bmap_btree.h"
52#include "xfs_reflink.h" 52#include "xfs_reflink.h"
53#include "xfs_dir2_priv.h"
53 54
54kmem_zone_t *xfs_inode_zone; 55kmem_zone_t *xfs_inode_zone;
55 56
@@ -3475,7 +3476,6 @@ xfs_iflush_int(
3475 struct xfs_inode_log_item *iip = ip->i_itemp; 3476 struct xfs_inode_log_item *iip = ip->i_itemp;
3476 struct xfs_dinode *dip; 3477 struct xfs_dinode *dip;
3477 struct xfs_mount *mp = ip->i_mount; 3478 struct xfs_mount *mp = ip->i_mount;
3478 int error;
3479 3479
3480 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 3480 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3481 ASSERT(xfs_isiflocked(ip)); 3481 ASSERT(xfs_isiflocked(ip));
@@ -3547,6 +3547,12 @@ xfs_iflush_int(
3547 if (ip->i_d.di_version < 3) 3547 if (ip->i_d.di_version < 3)
3548 ip->i_d.di_flushiter++; 3548 ip->i_d.di_flushiter++;
3549 3549
3550 /* Check the inline directory data. */
3551 if (S_ISDIR(VFS_I(ip)->i_mode) &&
3552 ip->i_d.di_format == XFS_DINODE_FMT_LOCAL &&
3553 xfs_dir2_sf_verify(ip))
3554 goto corrupt_out;
3555
3550 /* 3556 /*
3551 * Copy the dirty parts of the inode into the on-disk inode. We always 3557 * Copy the dirty parts of the inode into the on-disk inode. We always
3552 * copy out the core of the inode, because if the inode is dirty at all 3558 * copy out the core of the inode, because if the inode is dirty at all
@@ -3558,14 +3564,9 @@ xfs_iflush_int(
3558 if (ip->i_d.di_flushiter == DI_MAX_FLUSH) 3564 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3559 ip->i_d.di_flushiter = 0; 3565 ip->i_d.di_flushiter = 0;
3560 3566
3561 error = xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK); 3567 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3562 if (error) 3568 if (XFS_IFORK_Q(ip))
3563 return error; 3569 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3564 if (XFS_IFORK_Q(ip)) {
3565 error = xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3566 if (error)
3567 return error;
3568 }
3569 xfs_inobp_check(mp, bp); 3570 xfs_inobp_check(mp, bp);
3570 3571
3571 /* 3572 /*
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 229cc6a6d8ef..ebfc13350f9a 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -516,6 +516,20 @@ xfs_vn_getattr(
516 stat->blocks = 516 stat->blocks =
517 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); 517 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
518 518
519 if (ip->i_d.di_version == 3) {
520 if (request_mask & STATX_BTIME) {
521 stat->result_mask |= STATX_BTIME;
522 stat->btime.tv_sec = ip->i_d.di_crtime.t_sec;
523 stat->btime.tv_nsec = ip->i_d.di_crtime.t_nsec;
524 }
525 }
526
527 if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
528 stat->attributes |= STATX_ATTR_IMMUTABLE;
529 if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
530 stat->attributes |= STATX_ATTR_APPEND;
531 if (ip->i_d.di_flags & XFS_DIFLAG_NODUMP)
532 stat->attributes |= STATX_ATTR_NODUMP;
519 533
520 switch (inode->i_mode & S_IFMT) { 534 switch (inode->i_mode & S_IFMT) {
521 case S_IFBLK: 535 case S_IFBLK:
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 2a6d9b1558e0..26d67ce3c18d 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -583,7 +583,7 @@ xfs_inumbers(
583 return error; 583 return error;
584 584
585 bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer))); 585 bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
586 buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); 586 buffer = kmem_zalloc(bcount * sizeof(*buffer), KM_SLEEP);
587 do { 587 do {
588 struct xfs_inobt_rec_incore r; 588 struct xfs_inobt_rec_incore r;
589 int stat; 589 int stat;
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 4df64a1fc09e..532372c6cf15 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -14,8 +14,8 @@
14 * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.* 14 * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.*
15 * and/or .init.* sections. 15 * and/or .init.* sections.
16 * [__start_rodata, __end_rodata]: contains .rodata.* sections 16 * [__start_rodata, __end_rodata]: contains .rodata.* sections
17 * [__start_data_ro_after_init, __end_data_ro_after_init]: 17 * [__start_ro_after_init, __end_ro_after_init]:
18 * contains data.ro_after_init section 18 * contains .data..ro_after_init section
19 * [__init_begin, __init_end]: contains .init.* sections, but .init.text.* 19 * [__init_begin, __init_end]: contains .init.* sections, but .init.text.*
20 * may be out of this range on some architectures. 20 * may be out of this range on some architectures.
21 * [_sinittext, _einittext]: contains .init.text.* sections 21 * [_sinittext, _einittext]: contains .init.text.* sections
@@ -33,7 +33,7 @@ extern char _data[], _sdata[], _edata[];
33extern char __bss_start[], __bss_stop[]; 33extern char __bss_start[], __bss_stop[];
34extern char __init_begin[], __init_end[]; 34extern char __init_begin[], __init_end[];
35extern char _sinittext[], _einittext[]; 35extern char _sinittext[], _einittext[];
36extern char __start_data_ro_after_init[], __end_data_ro_after_init[]; 36extern char __start_ro_after_init[], __end_ro_after_init[];
37extern char _end[]; 37extern char _end[];
38extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; 38extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
39extern char __kprobes_text_start[], __kprobes_text_end[]; 39extern char __kprobes_text_start[], __kprobes_text_end[];
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 0968d13b3885..143db9c523e2 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -173,6 +173,7 @@
173 KEEP(*(__##name##_of_table_end)) 173 KEEP(*(__##name##_of_table_end))
174 174
175#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc) 175#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
176#define CLKEVT_OF_TABLES() OF_TABLE(CONFIG_CLKEVT_OF, clkevt)
176#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) 177#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
177#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) 178#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
178#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu) 179#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu)
@@ -260,9 +261,9 @@
260 */ 261 */
261#ifndef RO_AFTER_INIT_DATA 262#ifndef RO_AFTER_INIT_DATA
262#define RO_AFTER_INIT_DATA \ 263#define RO_AFTER_INIT_DATA \
263 __start_data_ro_after_init = .; \ 264 VMLINUX_SYMBOL(__start_ro_after_init) = .; \
264 *(.data..ro_after_init) \ 265 *(.data..ro_after_init) \
265 __end_data_ro_after_init = .; 266 VMLINUX_SYMBOL(__end_ro_after_init) = .;
266#endif 267#endif
267 268
268/* 269/*
@@ -559,6 +560,7 @@
559 CLK_OF_TABLES() \ 560 CLK_OF_TABLES() \
560 RESERVEDMEM_OF_TABLES() \ 561 RESERVEDMEM_OF_TABLES() \
561 CLKSRC_OF_TABLES() \ 562 CLKSRC_OF_TABLES() \
563 CLKEVT_OF_TABLES() \
562 IOMMU_OF_TABLES() \ 564 IOMMU_OF_TABLES() \
563 CPU_METHOD_OF_TABLES() \ 565 CPU_METHOD_OF_TABLES() \
564 CPUIDLE_METHOD_OF_TABLES() \ 566 CPUIDLE_METHOD_OF_TABLES() \
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 1d4f365d8f03..f6d9af3efa45 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -166,6 +166,16 @@ static inline struct ahash_instance *ahash_alloc_instance(
166 return crypto_alloc_instance2(name, alg, ahash_instance_headroom()); 166 return crypto_alloc_instance2(name, alg, ahash_instance_headroom());
167} 167}
168 168
169static inline void ahash_request_complete(struct ahash_request *req, int err)
170{
171 req->base.complete(&req->base, err);
172}
173
174static inline u32 ahash_request_flags(struct ahash_request *req)
175{
176 return req->base.flags;
177}
178
169static inline struct crypto_ahash *crypto_spawn_ahash( 179static inline struct crypto_ahash *crypto_spawn_ahash(
170 struct crypto_ahash_spawn *spawn) 180 struct crypto_ahash_spawn *spawn)
171{ 181{
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
index ed953f98f0e1..1487011fe057 100644
--- a/include/drm/ttm/ttm_object.h
+++ b/include/drm/ttm/ttm_object.h
@@ -229,6 +229,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
229 * @ref_type: The type of reference. 229 * @ref_type: The type of reference.
230 * @existed: Upon completion, indicates that an identical reference object 230 * @existed: Upon completion, indicates that an identical reference object
231 * already existed, and the refcount was upped on that object instead. 231 * already existed, and the refcount was upped on that object instead.
232 * @require_existed: Fail with -EPERM if an identical ref object didn't
233 * already exist.
232 * 234 *
233 * Checks that the base object is shareable and adds a ref object to it. 235 * Checks that the base object is shareable and adds a ref object to it.
234 * 236 *
@@ -243,7 +245,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
243 */ 245 */
244extern int ttm_ref_object_add(struct ttm_object_file *tfile, 246extern int ttm_ref_object_add(struct ttm_object_file *tfile,
245 struct ttm_base_object *base, 247 struct ttm_base_object *base,
246 enum ttm_ref_type ref_type, bool *existed); 248 enum ttm_ref_type ref_type, bool *existed,
249 bool require_existed);
247 250
248extern bool ttm_ref_object_exists(struct ttm_object_file *tfile, 251extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
249 struct ttm_base_object *base); 252 struct ttm_base_object *base);
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index b72dd2ad5f44..c0b3d999c266 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -295,6 +295,7 @@ void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu);
295void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); 295void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
296int kvm_vgic_map_resources(struct kvm *kvm); 296int kvm_vgic_map_resources(struct kvm *kvm);
297int kvm_vgic_hyp_init(void); 297int kvm_vgic_hyp_init(void);
298void kvm_vgic_init_cpu_hardware(void);
298 299
299int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, 300int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
300 bool level); 301 bool level);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index b296a9006117..9382c5da7a2e 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -51,6 +51,7 @@ struct blk_mq_hw_ctx {
51 51
52 atomic_t nr_active; 52 atomic_t nr_active;
53 53
54 struct delayed_work delayed_run_work;
54 struct delayed_work delay_work; 55 struct delayed_work delay_work;
55 56
56 struct hlist_node cpuhp_dead; 57 struct hlist_node cpuhp_dead;
@@ -238,6 +239,7 @@ void blk_mq_stop_hw_queues(struct request_queue *q);
238void blk_mq_start_hw_queues(struct request_queue *q); 239void blk_mq_start_hw_queues(struct request_queue *q);
239void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 240void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
240void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); 241void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
242void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
241void blk_mq_run_hw_queues(struct request_queue *q, bool async); 243void blk_mq_run_hw_queues(struct request_queue *q, bool async);
242void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 244void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
243void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, 245void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5a7da607ca04..01a696b0a4d3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -610,7 +610,6 @@ struct request_queue {
610#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ 610#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
611#define QUEUE_FLAG_DAX 26 /* device supports DAX */ 611#define QUEUE_FLAG_DAX 26 /* device supports DAX */
612#define QUEUE_FLAG_STATS 27 /* track rq completion times */ 612#define QUEUE_FLAG_STATS 27 /* track rq completion times */
613#define QUEUE_FLAG_RESTART 28 /* queue needs restart at completion */
614 613
615#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 614#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
616 (1 << QUEUE_FLAG_STACKABLE) | \ 615 (1 << QUEUE_FLAG_STACKABLE) | \
@@ -1673,12 +1672,36 @@ static inline bool bios_segs_mergeable(struct request_queue *q,
1673 return true; 1672 return true;
1674} 1673}
1675 1674
1676static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, 1675static inline bool bio_will_gap(struct request_queue *q,
1677 struct bio *next) 1676 struct request *prev_rq,
1677 struct bio *prev,
1678 struct bio *next)
1678{ 1679{
1679 if (bio_has_data(prev) && queue_virt_boundary(q)) { 1680 if (bio_has_data(prev) && queue_virt_boundary(q)) {
1680 struct bio_vec pb, nb; 1681 struct bio_vec pb, nb;
1681 1682
1683 /*
1684 * don't merge if the 1st bio starts with non-zero
1685 * offset, otherwise it is quite difficult to respect
1686 * sg gap limit. We work hard to merge a huge number of small
1687 * single bios in case of mkfs.
1688 */
1689 if (prev_rq)
1690 bio_get_first_bvec(prev_rq->bio, &pb);
1691 else
1692 bio_get_first_bvec(prev, &pb);
1693 if (pb.bv_offset)
1694 return true;
1695
1696 /*
1697 * We don't need to worry about the situation that the
1698 * merged segment ends in unaligned virt boundary:
1699 *
1700 * - if 'pb' ends aligned, the merged segment ends aligned
1701 * - if 'pb' ends unaligned, the next bio must include
1702 * one single bvec of 'nb', otherwise the 'nb' can't
1703 * merge with 'pb'
1704 */
1682 bio_get_last_bvec(prev, &pb); 1705 bio_get_last_bvec(prev, &pb);
1683 bio_get_first_bvec(next, &nb); 1706 bio_get_first_bvec(next, &nb);
1684 1707
@@ -1691,12 +1714,12 @@ static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
1691 1714
1692static inline bool req_gap_back_merge(struct request *req, struct bio *bio) 1715static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
1693{ 1716{
1694 return bio_will_gap(req->q, req->biotail, bio); 1717 return bio_will_gap(req->q, req, req->biotail, bio);
1695} 1718}
1696 1719
1697static inline bool req_gap_front_merge(struct request *req, struct bio *bio) 1720static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
1698{ 1721{
1699 return bio_will_gap(req->q, bio, req->bio); 1722 return bio_will_gap(req->q, NULL, bio, req->bio);
1700} 1723}
1701 1724
1702int kblockd_schedule_work(struct work_struct *work); 1725int kblockd_schedule_work(struct work_struct *work);
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index c71dd8fa5764..c41b8d99dd0e 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -556,7 +556,7 @@ enum ccp_engine {
556 * struct ccp_cmd - CCP operation request 556 * struct ccp_cmd - CCP operation request
557 * @entry: list element (ccp driver use only) 557 * @entry: list element (ccp driver use only)
558 * @work: work element used for callbacks (ccp driver use only) 558 * @work: work element used for callbacks (ccp driver use only)
559 * @ccp: CCP device to be run on (ccp driver use only) 559 * @ccp: CCP device to be run on
560 * @ret: operation return code (ccp driver use only) 560 * @ret: operation return code (ccp driver use only)
561 * @flags: cmd processing flags 561 * @flags: cmd processing flags
562 * @engine: CCP operation to perform 562 * @engine: CCP operation to perform
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index f6b43fbb141c..af9c86e958bd 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -570,6 +570,25 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
570 pr_cont_kernfs_path(cgrp->kn); 570 pr_cont_kernfs_path(cgrp->kn);
571} 571}
572 572
573static inline void cgroup_init_kthreadd(void)
574{
575 /*
576 * kthreadd is inherited by all kthreads, keep it in the root so
577 * that the new kthreads are guaranteed to stay in the root until
578 * initialization is finished.
579 */
580 current->no_cgroup_migration = 1;
581}
582
583static inline void cgroup_kthread_ready(void)
584{
585 /*
586 * This kthread finished initialization. The creator should have
587 * set PF_NO_SETAFFINITY if this kthread should stay in the root.
588 */
589 current->no_cgroup_migration = 0;
590}
591
573#else /* !CONFIG_CGROUPS */ 592#else /* !CONFIG_CGROUPS */
574 593
575struct cgroup_subsys_state; 594struct cgroup_subsys_state;
@@ -590,6 +609,8 @@ static inline void cgroup_free(struct task_struct *p) {}
590 609
591static inline int cgroup_init_early(void) { return 0; } 610static inline int cgroup_init_early(void) { return 0; }
592static inline int cgroup_init(void) { return 0; } 611static inline int cgroup_init(void) { return 0; }
612static inline void cgroup_init_kthreadd(void) {}
613static inline void cgroup_kthread_ready(void) {}
593 614
594static inline bool task_under_cgroup_hierarchy(struct task_struct *task, 615static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
595 struct cgroup *ancestor) 616 struct cgroup *ancestor)
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 5d3053c34fb3..6d7edc3082f9 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -229,7 +229,7 @@ static inline void tick_setup_hrtimer_broadcast(void) { }
229 229
230#ifdef CONFIG_CLKEVT_PROBE 230#ifdef CONFIG_CLKEVT_PROBE
231extern int clockevent_probe(void); 231extern int clockevent_probe(void);
232#els 232#else
233static inline int clockevent_probe(void) { return 0; } 233static inline int clockevent_probe(void) { return 0; }
234#endif 234#endif
235 235
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index aebecc4ed088..22d39e8d4de1 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -211,7 +211,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *);
211extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); 211extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
212 212
213extern int elevator_init(struct request_queue *, char *); 213extern int elevator_init(struct request_queue *, char *);
214extern void elevator_exit(struct elevator_queue *); 214extern void elevator_exit(struct request_queue *, struct elevator_queue *);
215extern int elevator_change(struct request_queue *, const char *); 215extern int elevator_change(struct request_queue *, const char *);
216extern bool elv_bio_merge_ok(struct request *, struct bio *); 216extern bool elv_bio_merge_ok(struct request *, struct bio *);
217extern struct elevator_queue *elevator_alloc(struct request_queue *, 217extern struct elevator_queue *elevator_alloc(struct request_queue *,
diff --git a/include/linux/errqueue.h b/include/linux/errqueue.h
index 9ca23fcfb5d7..6fdfc884fdeb 100644
--- a/include/linux/errqueue.h
+++ b/include/linux/errqueue.h
@@ -20,6 +20,8 @@ struct sock_exterr_skb {
20 struct sock_extended_err ee; 20 struct sock_extended_err ee;
21 u16 addr_offset; 21 u16 addr_offset;
22 __be16 port; 22 __be16 port;
23 u8 opt_stats:1,
24 unused:7;
23}; 25};
24 26
25#endif 27#endif
diff --git a/include/linux/fscrypt_common.h b/include/linux/fscrypt_common.h
index 547f81592ba1..10c1abfbac6c 100644
--- a/include/linux/fscrypt_common.h
+++ b/include/linux/fscrypt_common.h
@@ -87,7 +87,6 @@ struct fscrypt_operations {
87 unsigned int flags; 87 unsigned int flags;
88 const char *key_prefix; 88 const char *key_prefix;
89 int (*get_context)(struct inode *, void *, size_t); 89 int (*get_context)(struct inode *, void *, size_t);
90 int (*prepare_context)(struct inode *);
91 int (*set_context)(struct inode *, const void *, size_t, void *); 90 int (*set_context)(struct inode *, const void *, size_t, void *);
92 int (*dummy_context)(struct inode *); 91 int (*dummy_context)(struct inode *);
93 bool (*is_encrypted)(struct inode *); 92 bool (*is_encrypted)(struct inode *);
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 2484b2fcc6eb..933d93656605 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -143,15 +143,6 @@ struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
143 struct fwnode_handle *child, 143 struct fwnode_handle *child,
144 enum gpiod_flags flags, 144 enum gpiod_flags flags,
145 const char *label); 145 const char *label);
146/* FIXME: delete this helper when users are switched over */
147static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
148 const char *con_id, struct fwnode_handle *child)
149{
150 return devm_fwnode_get_index_gpiod_from_child(dev, con_id,
151 0, child,
152 GPIOD_ASIS,
153 "?");
154}
155 146
156#else /* CONFIG_GPIOLIB */ 147#else /* CONFIG_GPIOLIB */
157 148
@@ -444,13 +435,6 @@ struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
444 return ERR_PTR(-ENOSYS); 435 return ERR_PTR(-ENOSYS);
445} 436}
446 437
447/* FIXME: delete this when all users are switched over */
448static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
449 const char *con_id, struct fwnode_handle *child)
450{
451 return ERR_PTR(-ENOSYS);
452}
453
454#endif /* CONFIG_GPIOLIB */ 438#endif /* CONFIG_GPIOLIB */
455 439
456static inline 440static inline
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 78d59dba563e..88b673749121 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -88,6 +88,7 @@ enum hwmon_temp_attributes {
88#define HWMON_T_CRIT_HYST BIT(hwmon_temp_crit_hyst) 88#define HWMON_T_CRIT_HYST BIT(hwmon_temp_crit_hyst)
89#define HWMON_T_EMERGENCY BIT(hwmon_temp_emergency) 89#define HWMON_T_EMERGENCY BIT(hwmon_temp_emergency)
90#define HWMON_T_EMERGENCY_HYST BIT(hwmon_temp_emergency_hyst) 90#define HWMON_T_EMERGENCY_HYST BIT(hwmon_temp_emergency_hyst)
91#define HWMON_T_ALARM BIT(hwmon_temp_alarm)
91#define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm) 92#define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm)
92#define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm) 93#define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm)
93#define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm) 94#define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm)
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 62bbf3c1aa4a..970771a5f739 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -845,6 +845,13 @@ struct vmbus_channel {
845 * link up channels based on their CPU affinity. 845 * link up channels based on their CPU affinity.
846 */ 846 */
847 struct list_head percpu_list; 847 struct list_head percpu_list;
848
849 /*
850 * Defer freeing channel until after all cpu's have
851 * gone through grace period.
852 */
853 struct rcu_head rcu;
854
848 /* 855 /*
849 * For performance critical channels (storage, networking 856 * For performance critical channels (storage, networking
850 * etc,), Hyper-V has a mechanism to enhance the throughput 857 * etc,), Hyper-V has a mechanism to enhance the throughput
@@ -1430,9 +1437,6 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
1430 const int *srv_version, int srv_vercnt, 1437 const int *srv_version, int srv_vercnt,
1431 int *nego_fw_version, int *nego_srv_version); 1438 int *nego_fw_version, int *nego_srv_version);
1432 1439
1433void hv_event_tasklet_disable(struct vmbus_channel *channel);
1434void hv_event_tasklet_enable(struct vmbus_channel *channel);
1435
1436void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); 1440void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
1437 1441
1438void vmbus_setevent(struct vmbus_channel *channel); 1442void vmbus_setevent(struct vmbus_channel *channel);
diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h
index 23ca41515527..fa7931933067 100644
--- a/include/linux/iio/sw_device.h
+++ b/include/linux/iio/sw_device.h
@@ -62,7 +62,7 @@ void iio_swd_group_init_type_name(struct iio_sw_device *d,
62 const char *name, 62 const char *name,
63 struct config_item_type *type) 63 struct config_item_type *type)
64{ 64{
65#ifdef CONFIG_CONFIGFS_FS 65#if IS_ENABLED(CONFIG_CONFIGFS_FS)
66 config_group_init_type_name(&d->group, name, type); 66 config_group_init_type_name(&d->group, name, type);
67#endif 67#endif
68} 68}
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 6a6de187ddc0..2e4de0deee53 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -125,9 +125,16 @@ enum iommu_attr {
125}; 125};
126 126
127/* These are the possible reserved region types */ 127/* These are the possible reserved region types */
128#define IOMMU_RESV_DIRECT (1 << 0) 128enum iommu_resv_type {
129#define IOMMU_RESV_RESERVED (1 << 1) 129 /* Memory regions which must be mapped 1:1 at all times */
130#define IOMMU_RESV_MSI (1 << 2) 130 IOMMU_RESV_DIRECT,
131 /* Arbitrary "never map this or give it to a device" address ranges */
132 IOMMU_RESV_RESERVED,
133 /* Hardware MSI region (untranslated) */
134 IOMMU_RESV_MSI,
135 /* Software-managed MSI translation window */
136 IOMMU_RESV_SW_MSI,
137};
131 138
132/** 139/**
133 * struct iommu_resv_region - descriptor for a reserved memory region 140 * struct iommu_resv_region - descriptor for a reserved memory region
@@ -142,7 +149,7 @@ struct iommu_resv_region {
142 phys_addr_t start; 149 phys_addr_t start;
143 size_t length; 150 size_t length;
144 int prot; 151 int prot;
145 int type; 152 enum iommu_resv_type type;
146}; 153};
147 154
148#ifdef CONFIG_IOMMU_API 155#ifdef CONFIG_IOMMU_API
@@ -288,7 +295,8 @@ extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
288extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); 295extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
289extern int iommu_request_dm_for_dev(struct device *dev); 296extern int iommu_request_dm_for_dev(struct device *dev);
290extern struct iommu_resv_region * 297extern struct iommu_resv_region *
291iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, int type); 298iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
299 enum iommu_resv_type type);
292extern int iommu_get_group_resv_regions(struct iommu_group *group, 300extern int iommu_get_group_resv_regions(struct iommu_group *group,
293 struct list_head *head); 301 struct list_head *head);
294 302
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index eafc965b3eb8..dc30f3d057eb 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -96,6 +96,9 @@
96#define GICH_MISR_EOI (1 << 0) 96#define GICH_MISR_EOI (1 << 0)
97#define GICH_MISR_U (1 << 1) 97#define GICH_MISR_U (1 << 1)
98 98
99#define GICV_PMR_PRIORITY_SHIFT 3
100#define GICV_PMR_PRIORITY_MASK (0x1f << GICV_PMR_PRIORITY_SHIFT)
101
99#ifndef __ASSEMBLY__ 102#ifndef __ASSEMBLY__
100 103
101#include <linux/irqdomain.h> 104#include <linux/irqdomain.h>
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5734480c9590..a5c7046f26b4 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -76,6 +76,9 @@ size_t ksize(const void *);
76static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); } 76static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
77size_t kasan_metadata_size(struct kmem_cache *cache); 77size_t kasan_metadata_size(struct kmem_cache *cache);
78 78
79bool kasan_save_enable_multi_shot(void);
80void kasan_restore_multi_shot(bool enabled);
81
79#else /* CONFIG_KASAN */ 82#else /* CONFIG_KASAN */
80 83
81static inline void kasan_unpoison_shadow(const void *address, size_t size) {} 84static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 2c14ad9809da..d0250744507a 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -162,8 +162,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
162 int len, void *val); 162 int len, void *val);
163int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 163int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
164 int len, struct kvm_io_device *dev); 164 int len, struct kvm_io_device *dev);
165int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 165void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
166 struct kvm_io_device *dev); 166 struct kvm_io_device *dev);
167struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 167struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
168 gpa_t addr); 168 gpa_t addr);
169 169
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 5af377303880..bb7250c45cb8 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -740,6 +740,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
740 return false; 740 return false;
741} 741}
742 742
743static inline void mem_cgroup_update_page_stat(struct page *page,
744 enum mem_cgroup_stat_index idx,
745 int nr)
746{
747}
748
743static inline void mem_cgroup_inc_page_stat(struct page *page, 749static inline void mem_cgroup_inc_page_stat(struct page *page,
744 enum mem_cgroup_stat_index idx) 750 enum mem_cgroup_stat_index idx)
745{ 751{
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 7a01c94496f1..3eef9fb9968a 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -35,10 +35,11 @@
35 * Max bus-specific overhead incurred by request/responses. 35 * Max bus-specific overhead incurred by request/responses.
36 * I2C requires 1 additional byte for requests. 36 * I2C requires 1 additional byte for requests.
37 * I2C requires 2 additional bytes for responses. 37 * I2C requires 2 additional bytes for responses.
38 * SPI requires up to 32 additional bytes for responses.
38 * */ 39 * */
39#define EC_PROTO_VERSION_UNKNOWN 0 40#define EC_PROTO_VERSION_UNKNOWN 0
40#define EC_MAX_REQUEST_OVERHEAD 1 41#define EC_MAX_REQUEST_OVERHEAD 1
41#define EC_MAX_RESPONSE_OVERHEAD 2 42#define EC_MAX_RESPONSE_OVERHEAD 32
42 43
43/* 44/*
44 * Command interface between EC and AP, for LPC, I2C and SPI interfaces. 45 * Command interface between EC and AP, for LPC, I2C and SPI interfaces.
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 7e66e4f62858..1beb1ec2fbdf 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -476,6 +476,7 @@ enum {
476enum { 476enum {
477 MLX4_INTERFACE_STATE_UP = 1 << 0, 477 MLX4_INTERFACE_STATE_UP = 1 << 0,
478 MLX4_INTERFACE_STATE_DELETION = 1 << 1, 478 MLX4_INTERFACE_STATE_DELETION = 1 << 1,
479 MLX4_INTERFACE_STATE_NOWAIT = 1 << 2,
479}; 480};
480 481
481#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ 482#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5f01c88f0800..00a8fa7e366a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -32,6 +32,8 @@ struct user_struct;
32struct writeback_control; 32struct writeback_control;
33struct bdi_writeback; 33struct bdi_writeback;
34 34
35void init_mm_internals(void);
36
35#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ 37#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
36extern unsigned long max_mapnr; 38extern unsigned long max_mapnr;
37 39
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index aab032a6ae61..97ca105347a6 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -53,7 +53,7 @@ struct sdio_func {
53 unsigned int state; /* function state */ 53 unsigned int state; /* function state */
54#define SDIO_STATE_PRESENT (1<<0) /* present in sysfs */ 54#define SDIO_STATE_PRESENT (1<<0) /* present in sysfs */
55 55
56 u8 tmpbuf[4]; /* DMA:able scratch buffer */ 56 u8 *tmpbuf; /* DMA:able scratch buffer */
57 57
58 unsigned num_info; /* number of info strings */ 58 unsigned num_info; /* number of info strings */
59 const char **info; /* info strings */ 59 const char **info; /* info strings */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 51891fb0d3ce..c91b3bcd158f 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -394,18 +394,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
394 ___pud; \ 394 ___pud; \
395}) 395})
396 396
397#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \
398({ \
399 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
400 pmd_t ___pmd; \
401 \
402 ___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd); \
403 mmu_notifier_invalidate_range(__mm, ___haddr, \
404 ___haddr + HPAGE_PMD_SIZE); \
405 \
406 ___pmd; \
407})
408
409/* 397/*
410 * set_pte_at_notify() sets the pte _after_ running the notifier. 398 * set_pte_at_notify() sets the pte _after_ running the notifier.
411 * This is safe to start by updating the secondary MMUs, because the primary MMU 399 * This is safe to start by updating the secondary MMUs, because the primary MMU
@@ -489,7 +477,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
489#define ptep_clear_flush_notify ptep_clear_flush 477#define ptep_clear_flush_notify ptep_clear_flush
490#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush 478#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
491#define pudp_huge_clear_flush_notify pudp_huge_clear_flush 479#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
492#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
493#define set_pte_at_notify set_pte_at 480#define set_pte_at_notify set_pte_at
494 481
495#endif /* CONFIG_MMU_NOTIFIER */ 482#endif /* CONFIG_MMU_NOTIFIER */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index c43d435d4225..9061780b141f 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -64,26 +64,26 @@ enum {
64 * RDMA_QPTYPE field 64 * RDMA_QPTYPE field
65 */ 65 */
66enum { 66enum {
67 NVMF_RDMA_QPTYPE_CONNECTED = 0, /* Reliable Connected */ 67 NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */
68 NVMF_RDMA_QPTYPE_DATAGRAM = 1, /* Reliable Datagram */ 68 NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */
69}; 69};
70 70
71/* RDMA QP Service Type codes for Discovery Log Page entry TSAS 71/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
72 * RDMA_QPTYPE field 72 * RDMA_QPTYPE field
73 */ 73 */
74enum { 74enum {
75 NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 0, /* No Provider Specified */ 75 NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */
76 NVMF_RDMA_PRTYPE_IB = 1, /* InfiniBand */ 76 NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */
77 NVMF_RDMA_PRTYPE_ROCE = 2, /* InfiniBand RoCE */ 77 NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */
78 NVMF_RDMA_PRTYPE_ROCEV2 = 3, /* InfiniBand RoCEV2 */ 78 NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */
79 NVMF_RDMA_PRTYPE_IWARP = 4, /* IWARP */ 79 NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */
80}; 80};
81 81
82/* RDMA Connection Management Service Type codes for Discovery Log Page 82/* RDMA Connection Management Service Type codes for Discovery Log Page
83 * entry TSAS RDMA_CMS field 83 * entry TSAS RDMA_CMS field
84 */ 84 */
85enum { 85enum {
86 NVMF_RDMA_CMS_RDMA_CM = 0, /* Sockets based enpoint addressing */ 86 NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
87}; 87};
88 88
89#define NVMF_AQ_DEPTH 32 89#define NVMF_AQ_DEPTH 32
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index 35d0fd7a4948..fd0de00c0d77 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -76,22 +76,12 @@ struct gpmc_timings;
76struct omap_nand_platform_data; 76struct omap_nand_platform_data;
77struct omap_onenand_platform_data; 77struct omap_onenand_platform_data;
78 78
79#if IS_ENABLED(CONFIG_MTD_NAND_OMAP2)
80extern int gpmc_nand_init(struct omap_nand_platform_data *d,
81 struct gpmc_timings *gpmc_t);
82#else
83static inline int gpmc_nand_init(struct omap_nand_platform_data *d,
84 struct gpmc_timings *gpmc_t)
85{
86 return 0;
87}
88#endif
89
90#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2) 79#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
91extern void gpmc_onenand_init(struct omap_onenand_platform_data *d); 80extern int gpmc_onenand_init(struct omap_onenand_platform_data *d);
92#else 81#else
93#define board_onenand_data NULL 82#define board_onenand_data NULL
94static inline void gpmc_onenand_init(struct omap_onenand_platform_data *d) 83static inline int gpmc_onenand_init(struct omap_onenand_platform_data *d)
95{ 84{
85 return 0;
96} 86}
97#endif 87#endif
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 8ce2d87a238b..5e45385c5bdc 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -145,8 +145,9 @@ struct pinctrl_desc {
145extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc, 145extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
146 struct device *dev, void *driver_data, 146 struct device *dev, void *driver_data,
147 struct pinctrl_dev **pctldev); 147 struct pinctrl_dev **pctldev);
148extern int pinctrl_enable(struct pinctrl_dev *pctldev);
148 149
149/* Please use pinctrl_register_and_init() instead */ 150/* Please use pinctrl_register_and_init() and pinctrl_enable() instead */
150extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, 151extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
151 struct device *dev, void *driver_data); 152 struct device *dev, void *driver_data);
152 153
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 86b4ed75359e..13d8681210d5 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -15,6 +15,9 @@ int reset_control_status(struct reset_control *rstc);
15struct reset_control *__of_reset_control_get(struct device_node *node, 15struct reset_control *__of_reset_control_get(struct device_node *node,
16 const char *id, int index, bool shared, 16 const char *id, int index, bool shared,
17 bool optional); 17 bool optional);
18struct reset_control *__reset_control_get(struct device *dev, const char *id,
19 int index, bool shared,
20 bool optional);
18void reset_control_put(struct reset_control *rstc); 21void reset_control_put(struct reset_control *rstc);
19struct reset_control *__devm_reset_control_get(struct device *dev, 22struct reset_control *__devm_reset_control_get(struct device *dev,
20 const char *id, int index, bool shared, 23 const char *id, int index, bool shared,
@@ -31,31 +34,26 @@ static inline int device_reset_optional(struct device *dev)
31 34
32static inline int reset_control_reset(struct reset_control *rstc) 35static inline int reset_control_reset(struct reset_control *rstc)
33{ 36{
34 WARN_ON(1);
35 return 0; 37 return 0;
36} 38}
37 39
38static inline int reset_control_assert(struct reset_control *rstc) 40static inline int reset_control_assert(struct reset_control *rstc)
39{ 41{
40 WARN_ON(1);
41 return 0; 42 return 0;
42} 43}
43 44
44static inline int reset_control_deassert(struct reset_control *rstc) 45static inline int reset_control_deassert(struct reset_control *rstc)
45{ 46{
46 WARN_ON(1);
47 return 0; 47 return 0;
48} 48}
49 49
50static inline int reset_control_status(struct reset_control *rstc) 50static inline int reset_control_status(struct reset_control *rstc)
51{ 51{
52 WARN_ON(1);
53 return 0; 52 return 0;
54} 53}
55 54
56static inline void reset_control_put(struct reset_control *rstc) 55static inline void reset_control_put(struct reset_control *rstc)
57{ 56{
58 WARN_ON(1);
59} 57}
60 58
61static inline int __must_check device_reset(struct device *dev) 59static inline int __must_check device_reset(struct device *dev)
@@ -74,14 +72,21 @@ static inline struct reset_control *__of_reset_control_get(
74 const char *id, int index, bool shared, 72 const char *id, int index, bool shared,
75 bool optional) 73 bool optional)
76{ 74{
77 return ERR_PTR(-ENOTSUPP); 75 return optional ? NULL : ERR_PTR(-ENOTSUPP);
76}
77
78static inline struct reset_control *__reset_control_get(
79 struct device *dev, const char *id,
80 int index, bool shared, bool optional)
81{
82 return optional ? NULL : ERR_PTR(-ENOTSUPP);
78} 83}
79 84
80static inline struct reset_control *__devm_reset_control_get( 85static inline struct reset_control *__devm_reset_control_get(
81 struct device *dev, const char *id, 86 struct device *dev, const char *id,
82 int index, bool shared, bool optional) 87 int index, bool shared, bool optional)
83{ 88{
84 return ERR_PTR(-ENOTSUPP); 89 return optional ? NULL : ERR_PTR(-ENOTSUPP);
85} 90}
86 91
87#endif /* CONFIG_RESET_CONTROLLER */ 92#endif /* CONFIG_RESET_CONTROLLER */
@@ -107,8 +112,7 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
107#ifndef CONFIG_RESET_CONTROLLER 112#ifndef CONFIG_RESET_CONTROLLER
108 WARN_ON(1); 113 WARN_ON(1);
109#endif 114#endif
110 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false, 115 return __reset_control_get(dev, id, 0, false, false);
111 false);
112} 116}
113 117
114/** 118/**
@@ -136,22 +140,19 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
136static inline struct reset_control *reset_control_get_shared( 140static inline struct reset_control *reset_control_get_shared(
137 struct device *dev, const char *id) 141 struct device *dev, const char *id)
138{ 142{
139 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true, 143 return __reset_control_get(dev, id, 0, true, false);
140 false);
141} 144}
142 145
143static inline struct reset_control *reset_control_get_optional_exclusive( 146static inline struct reset_control *reset_control_get_optional_exclusive(
144 struct device *dev, const char *id) 147 struct device *dev, const char *id)
145{ 148{
146 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false, 149 return __reset_control_get(dev, id, 0, false, true);
147 true);
148} 150}
149 151
150static inline struct reset_control *reset_control_get_optional_shared( 152static inline struct reset_control *reset_control_get_optional_shared(
151 struct device *dev, const char *id) 153 struct device *dev, const char *id)
152{ 154{
153 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true, 155 return __reset_control_get(dev, id, 0, true, true);
154 true);
155} 156}
156 157
157/** 158/**
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d67eee84fd43..4cf9a59a4d08 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -604,6 +604,10 @@ struct task_struct {
604#ifdef CONFIG_COMPAT_BRK 604#ifdef CONFIG_COMPAT_BRK
605 unsigned brk_randomized:1; 605 unsigned brk_randomized:1;
606#endif 606#endif
607#ifdef CONFIG_CGROUPS
608 /* disallow userland-initiated cgroup migration */
609 unsigned no_cgroup_migration:1;
610#endif
607 611
608 unsigned long atomic_flags; /* Flags requiring atomic access. */ 612 unsigned long atomic_flags; /* Flags requiring atomic access. */
609 613
diff --git a/include/linux/sched/clock.h b/include/linux/sched/clock.h
index 4a68c6791207..34fe92ce1ebd 100644
--- a/include/linux/sched/clock.h
+++ b/include/linux/sched/clock.h
@@ -54,15 +54,16 @@ static inline u64 local_clock(void)
54} 54}
55#else 55#else
56extern void sched_clock_init_late(void); 56extern void sched_clock_init_late(void);
57/*
58 * Architectures can set this to 1 if they have specified
59 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
60 * but then during bootup it turns out that sched_clock()
61 * is reliable after all:
62 */
63extern int sched_clock_stable(void); 57extern int sched_clock_stable(void);
64extern void clear_sched_clock_stable(void); 58extern void clear_sched_clock_stable(void);
65 59
60/*
61 * When sched_clock_stable(), __sched_clock_offset provides the offset
62 * between local_clock() and sched_clock().
63 */
64extern u64 __sched_clock_offset;
65
66
66extern void sched_clock_tick(void); 67extern void sched_clock_tick(void);
67extern void sched_clock_idle_sleep_event(void); 68extern void sched_clock_idle_sleep_event(void);
68extern void sched_clock_idle_wakeup_event(u64 delta_ns); 69extern void sched_clock_idle_wakeup_event(u64 delta_ns);
diff --git a/include/linux/stat.h b/include/linux/stat.h
index c76e524fb34b..64b6b3aece21 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -26,6 +26,7 @@ struct kstat {
26 unsigned int nlink; 26 unsigned int nlink;
27 uint32_t blksize; /* Preferred I/O size */ 27 uint32_t blksize; /* Preferred I/O size */
28 u64 attributes; 28 u64 attributes;
29 u64 attributes_mask;
29#define KSTAT_ATTR_FS_IOC_FLAGS \ 30#define KSTAT_ATTR_FS_IOC_FLAGS \
30 (STATX_ATTR_COMPRESSED | \ 31 (STATX_ATTR_COMPRESSED | \
31 STATX_ATTR_IMMUTABLE | \ 32 STATX_ATTR_IMMUTABLE | \
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 804e34c6f981..f2d36a3d3005 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -39,7 +39,10 @@ struct iov_iter {
39 }; 39 };
40 union { 40 union {
41 unsigned long nr_segs; 41 unsigned long nr_segs;
42 int idx; 42 struct {
43 int idx;
44 int start_idx;
45 };
43 }; 46 };
44}; 47};
45 48
@@ -81,6 +84,7 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
81size_t iov_iter_copy_from_user_atomic(struct page *page, 84size_t iov_iter_copy_from_user_atomic(struct page *page,
82 struct iov_iter *i, unsigned long offset, size_t bytes); 85 struct iov_iter *i, unsigned long offset, size_t bytes);
83void iov_iter_advance(struct iov_iter *i, size_t bytes); 86void iov_iter_advance(struct iov_iter *i, size_t bytes);
87void iov_iter_revert(struct iov_iter *i, size_t bytes);
84int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); 88int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
85size_t iov_iter_single_seg_count(const struct iov_iter *i); 89size_t iov_iter_single_seg_count(const struct iov_iter *i);
86size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 90size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 1d0043dc34e4..de2a722fe3cf 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -50,4 +50,10 @@
50/* device can't handle Link Power Management */ 50/* device can't handle Link Power Management */
51#define USB_QUIRK_NO_LPM BIT(10) 51#define USB_QUIRK_NO_LPM BIT(10)
52 52
53/*
54 * Device reports its bInterval as linear frames instead of the
55 * USB 2.0 calculation.
56 */
57#define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL BIT(11)
58
53#endif /* __LINUX_USB_QUIRKS_H */ 59#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 04b0d3f95043..7edfbdb55a99 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -167,6 +167,7 @@ struct virtio_driver {
167 unsigned int feature_table_size; 167 unsigned int feature_table_size;
168 const unsigned int *feature_table_legacy; 168 const unsigned int *feature_table_legacy;
169 unsigned int feature_table_size_legacy; 169 unsigned int feature_table_size_legacy;
170 int (*validate)(struct virtio_device *dev);
170 int (*probe)(struct virtio_device *dev); 171 int (*probe)(struct virtio_device *dev);
171 void (*scan)(struct virtio_device *dev); 172 void (*scan)(struct virtio_device *dev);
172 void (*remove)(struct virtio_device *dev); 173 void (*remove)(struct virtio_device *dev);
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index 9638bfeb0d1f..584f9a647ad4 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -48,6 +48,8 @@ struct virtio_vsock_pkt {
48 struct virtio_vsock_hdr hdr; 48 struct virtio_vsock_hdr hdr;
49 struct work_struct work; 49 struct work_struct work;
50 struct list_head list; 50 struct list_head list;
51 /* socket refcnt not held, only use for cancellation */
52 struct vsock_sock *vsk;
51 void *buf; 53 void *buf;
52 u32 len; 54 u32 len;
53 u32 off; 55 u32 off;
@@ -56,6 +58,7 @@ struct virtio_vsock_pkt {
56 58
57struct virtio_vsock_pkt_info { 59struct virtio_vsock_pkt_info {
58 u32 remote_cid, remote_port; 60 u32 remote_cid, remote_port;
61 struct vsock_sock *vsk;
59 struct msghdr *msg; 62 struct msghdr *msg;
60 u32 pkt_len; 63 u32 pkt_len;
61 u16 type; 64 u16 type;
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index f2758964ce6f..f32ed9ac181a 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -100,6 +100,9 @@ struct vsock_transport {
100 void (*destruct)(struct vsock_sock *); 100 void (*destruct)(struct vsock_sock *);
101 void (*release)(struct vsock_sock *); 101 void (*release)(struct vsock_sock *);
102 102
103 /* Cancel all pending packets sent on vsock. */
104 int (*cancel_pkt)(struct vsock_sock *vsk);
105
103 /* Connections. */ 106 /* Connections. */
104 int (*connect)(struct vsock_sock *); 107 int (*connect)(struct vsock_sock *);
105 108
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index f540f9ad2af4..19605878da47 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -244,7 +244,7 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
244 u32 seq); 244 u32 seq);
245 245
246/* Fake conntrack entry for untracked connections */ 246/* Fake conntrack entry for untracked connections */
247DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked); 247DECLARE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked);
248static inline struct nf_conn *nf_ct_untracked_get(void) 248static inline struct nf_conn *nf_ct_untracked_get(void)
249{ 249{
250 return raw_cpu_ptr(&nf_conntrack_untracked); 250 return raw_cpu_ptr(&nf_conntrack_untracked);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 2aa8a9d80fbe..0136028652bd 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -103,6 +103,35 @@ struct nft_regs {
103 }; 103 };
104}; 104};
105 105
106/* Store/load an u16 or u8 integer to/from the u32 data register.
107 *
108 * Note, when using concatenations, register allocation happens at 32-bit
109 * level. So for store instruction, pad the rest part with zero to avoid
110 * garbage values.
111 */
112
113static inline void nft_reg_store16(u32 *dreg, u16 val)
114{
115 *dreg = 0;
116 *(u16 *)dreg = val;
117}
118
119static inline void nft_reg_store8(u32 *dreg, u8 val)
120{
121 *dreg = 0;
122 *(u8 *)dreg = val;
123}
124
125static inline u16 nft_reg_load16(u32 *sreg)
126{
127 return *(u16 *)sreg;
128}
129
130static inline u8 nft_reg_load8(u32 *sreg)
131{
132 return *(u8 *)sreg;
133}
134
106static inline void nft_data_copy(u32 *dst, const struct nft_data *src, 135static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
107 unsigned int len) 136 unsigned int len)
108{ 137{
@@ -203,7 +232,6 @@ struct nft_set_elem {
203struct nft_set; 232struct nft_set;
204struct nft_set_iter { 233struct nft_set_iter {
205 u8 genmask; 234 u8 genmask;
206 bool flush;
207 unsigned int count; 235 unsigned int count;
208 unsigned int skip; 236 unsigned int skip;
209 int err; 237 int err;
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index d150b5066201..97983d1c05e4 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -9,12 +9,13 @@ nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
9 struct sk_buff *skb, 9 struct sk_buff *skb,
10 const struct nf_hook_state *state) 10 const struct nf_hook_state *state)
11{ 11{
12 unsigned int flags = IP6_FH_F_AUTH;
12 int protohdr, thoff = 0; 13 int protohdr, thoff = 0;
13 unsigned short frag_off; 14 unsigned short frag_off;
14 15
15 nft_set_pktinfo(pkt, skb, state); 16 nft_set_pktinfo(pkt, skb, state);
16 17
17 protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL); 18 protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
18 if (protohdr < 0) { 19 if (protohdr < 0) {
19 nft_set_pktinfo_proto_unspec(pkt, skb); 20 nft_set_pktinfo_proto_unspec(pkt, skb);
20 return; 21 return;
@@ -32,6 +33,7 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
32 const struct nf_hook_state *state) 33 const struct nf_hook_state *state)
33{ 34{
34#if IS_ENABLED(CONFIG_IPV6) 35#if IS_ENABLED(CONFIG_IPV6)
36 unsigned int flags = IP6_FH_F_AUTH;
35 struct ipv6hdr *ip6h, _ip6h; 37 struct ipv6hdr *ip6h, _ip6h;
36 unsigned int thoff = 0; 38 unsigned int thoff = 0;
37 unsigned short frag_off; 39 unsigned short frag_off;
@@ -50,7 +52,7 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
50 if (pkt_len + sizeof(*ip6h) > skb->len) 52 if (pkt_len + sizeof(*ip6h) > skb->len)
51 return -1; 53 return -1;
52 54
53 protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL); 55 protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
54 if (protohdr < 0) 56 if (protohdr < 0)
55 return -1; 57 return -1;
56 58
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 1f71ee5ab518..069582ee5d7f 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -448,10 +448,9 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
448 return frag; 448 return frag;
449} 449}
450 450
451static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_association *asoc) 451static inline void sctp_assoc_pending_pmtu(struct sctp_association *asoc)
452{ 452{
453 453 sctp_assoc_sync_pmtu(asoc);
454 sctp_assoc_sync_pmtu(sk, asoc);
455 asoc->pmtu_pending = 0; 454 asoc->pmtu_pending = 0;
456} 455}
457 456
@@ -596,12 +595,23 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr)
596 */ 595 */
597static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) 596static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
598{ 597{
599 if (t->dst && (!dst_check(t->dst, t->dst_cookie) || 598 if (t->dst && !dst_check(t->dst, t->dst_cookie))
600 t->pathmtu != max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)),
601 SCTP_DEFAULT_MINSEGMENT)))
602 sctp_transport_dst_release(t); 599 sctp_transport_dst_release(t);
603 600
604 return t->dst; 601 return t->dst;
605} 602}
606 603
604static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
605{
606 __u32 pmtu = max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)),
607 SCTP_DEFAULT_MINSEGMENT);
608
609 if (t->pathmtu == pmtu)
610 return true;
611
612 t->pathmtu = pmtu;
613
614 return false;
615}
616
607#endif /* __net_sctp_h__ */ 617#endif /* __net_sctp_h__ */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 07a0b128625a..138f8615acf0 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -83,6 +83,7 @@ struct sctp_bind_addr;
83struct sctp_ulpq; 83struct sctp_ulpq;
84struct sctp_ep_common; 84struct sctp_ep_common;
85struct crypto_shash; 85struct crypto_shash;
86struct sctp_stream;
86 87
87 88
88#include <net/sctp/tsnmap.h> 89#include <net/sctp/tsnmap.h>
@@ -376,7 +377,8 @@ typedef struct sctp_sender_hb_info {
376 __u64 hb_nonce; 377 __u64 hb_nonce;
377} sctp_sender_hb_info_t; 378} sctp_sender_hb_info_t;
378 379
379struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp); 380int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp);
381int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp);
380void sctp_stream_free(struct sctp_stream *stream); 382void sctp_stream_free(struct sctp_stream *stream);
381void sctp_stream_clear(struct sctp_stream *stream); 383void sctp_stream_clear(struct sctp_stream *stream);
382 384
@@ -498,7 +500,6 @@ struct sctp_datamsg {
498 /* Did the messenge fail to send? */ 500 /* Did the messenge fail to send? */
499 int send_error; 501 int send_error;
500 u8 send_failed:1, 502 u8 send_failed:1,
501 force_delay:1,
502 can_delay; /* should this message be Nagle delayed */ 503 can_delay; /* should this message be Nagle delayed */
503}; 504};
504 505
@@ -753,6 +754,8 @@ struct sctp_transport {
753 /* Is the Path MTU update pending on this tranport */ 754 /* Is the Path MTU update pending on this tranport */
754 pmtu_pending:1, 755 pmtu_pending:1,
755 756
757 dst_pending_confirm:1, /* need to confirm neighbour */
758
756 /* Has this transport moved the ctsn since we last sacked */ 759 /* Has this transport moved the ctsn since we last sacked */
757 sack_generation:1; 760 sack_generation:1;
758 u32 dst_cookie; 761 u32 dst_cookie;
@@ -806,8 +809,6 @@ struct sctp_transport {
806 809
807 __u32 burst_limited; /* Holds old cwnd when max.burst is applied */ 810 __u32 burst_limited; /* Holds old cwnd when max.burst is applied */
808 811
809 __u32 dst_pending_confirm; /* need to confirm neighbour */
810
811 /* Destination */ 812 /* Destination */
812 struct dst_entry *dst; 813 struct dst_entry *dst;
813 /* Source address. */ 814 /* Source address. */
@@ -951,8 +952,8 @@ void sctp_transport_lower_cwnd(struct sctp_transport *, sctp_lower_cwnd_t);
951void sctp_transport_burst_limited(struct sctp_transport *); 952void sctp_transport_burst_limited(struct sctp_transport *);
952void sctp_transport_burst_reset(struct sctp_transport *); 953void sctp_transport_burst_reset(struct sctp_transport *);
953unsigned long sctp_transport_timeout(struct sctp_transport *); 954unsigned long sctp_transport_timeout(struct sctp_transport *);
954void sctp_transport_reset(struct sctp_transport *); 955void sctp_transport_reset(struct sctp_transport *t);
955void sctp_transport_update_pmtu(struct sock *, struct sctp_transport *, u32); 956void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
956void sctp_transport_immediate_rtx(struct sctp_transport *); 957void sctp_transport_immediate_rtx(struct sctp_transport *);
957void sctp_transport_dst_release(struct sctp_transport *t); 958void sctp_transport_dst_release(struct sctp_transport *t);
958void sctp_transport_dst_confirm(struct sctp_transport *t); 959void sctp_transport_dst_confirm(struct sctp_transport *t);
@@ -1877,6 +1878,7 @@ struct sctp_association {
1877 1878
1878 __u8 need_ecne:1, /* Need to send an ECNE Chunk? */ 1879 __u8 need_ecne:1, /* Need to send an ECNE Chunk? */
1879 temp:1, /* Is it a temporary association? */ 1880 temp:1, /* Is it a temporary association? */
1881 force_delay:1,
1880 prsctp_enable:1, 1882 prsctp_enable:1,
1881 reconf_enable:1; 1883 reconf_enable:1;
1882 1884
@@ -1952,7 +1954,7 @@ void sctp_assoc_update(struct sctp_association *old,
1952 1954
1953__u32 sctp_association_get_next_tsn(struct sctp_association *); 1955__u32 sctp_association_get_next_tsn(struct sctp_association *);
1954 1956
1955void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *); 1957void sctp_assoc_sync_pmtu(struct sctp_association *asoc);
1956void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int); 1958void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int);
1957void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int); 1959void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
1958void sctp_assoc_set_primary(struct sctp_association *, 1960void sctp_assoc_set_primary(struct sctp_association *,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 0f1813c13687..99e4423eb2b8 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1863,6 +1863,9 @@ struct ib_port_immutable {
1863}; 1863};
1864 1864
1865struct ib_device { 1865struct ib_device {
1866 /* Do not access @dma_device directly from ULP nor from HW drivers. */
1867 struct device *dma_device;
1868
1866 char name[IB_DEVICE_NAME_MAX]; 1869 char name[IB_DEVICE_NAME_MAX];
1867 1870
1868 struct list_head event_handler_list; 1871 struct list_head event_handler_list;
@@ -3007,7 +3010,7 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3007 */ 3010 */
3008static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 3011static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3009{ 3012{
3010 return dma_mapping_error(&dev->dev, dma_addr); 3013 return dma_mapping_error(dev->dma_device, dma_addr);
3011} 3014}
3012 3015
3013/** 3016/**
@@ -3021,7 +3024,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev,
3021 void *cpu_addr, size_t size, 3024 void *cpu_addr, size_t size,
3022 enum dma_data_direction direction) 3025 enum dma_data_direction direction)
3023{ 3026{
3024 return dma_map_single(&dev->dev, cpu_addr, size, direction); 3027 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3025} 3028}
3026 3029
3027/** 3030/**
@@ -3035,7 +3038,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
3035 u64 addr, size_t size, 3038 u64 addr, size_t size,
3036 enum dma_data_direction direction) 3039 enum dma_data_direction direction)
3037{ 3040{
3038 dma_unmap_single(&dev->dev, addr, size, direction); 3041 dma_unmap_single(dev->dma_device, addr, size, direction);
3039} 3042}
3040 3043
3041/** 3044/**
@@ -3052,7 +3055,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev,
3052 size_t size, 3055 size_t size,
3053 enum dma_data_direction direction) 3056 enum dma_data_direction direction)
3054{ 3057{
3055 return dma_map_page(&dev->dev, page, offset, size, direction); 3058 return dma_map_page(dev->dma_device, page, offset, size, direction);
3056} 3059}
3057 3060
3058/** 3061/**
@@ -3066,7 +3069,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
3066 u64 addr, size_t size, 3069 u64 addr, size_t size,
3067 enum dma_data_direction direction) 3070 enum dma_data_direction direction)
3068{ 3071{
3069 dma_unmap_page(&dev->dev, addr, size, direction); 3072 dma_unmap_page(dev->dma_device, addr, size, direction);
3070} 3073}
3071 3074
3072/** 3075/**
@@ -3080,7 +3083,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev,
3080 struct scatterlist *sg, int nents, 3083 struct scatterlist *sg, int nents,
3081 enum dma_data_direction direction) 3084 enum dma_data_direction direction)
3082{ 3085{
3083 return dma_map_sg(&dev->dev, sg, nents, direction); 3086 return dma_map_sg(dev->dma_device, sg, nents, direction);
3084} 3087}
3085 3088
3086/** 3089/**
@@ -3094,7 +3097,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
3094 struct scatterlist *sg, int nents, 3097 struct scatterlist *sg, int nents,
3095 enum dma_data_direction direction) 3098 enum dma_data_direction direction)
3096{ 3099{
3097 dma_unmap_sg(&dev->dev, sg, nents, direction); 3100 dma_unmap_sg(dev->dma_device, sg, nents, direction);
3098} 3101}
3099 3102
3100static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 3103static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
@@ -3102,7 +3105,8 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3102 enum dma_data_direction direction, 3105 enum dma_data_direction direction,
3103 unsigned long dma_attrs) 3106 unsigned long dma_attrs)
3104{ 3107{
3105 return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); 3108 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3109 dma_attrs);
3106} 3110}
3107 3111
3108static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 3112static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
@@ -3110,7 +3114,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3110 enum dma_data_direction direction, 3114 enum dma_data_direction direction,
3111 unsigned long dma_attrs) 3115 unsigned long dma_attrs)
3112{ 3116{
3113 dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); 3117 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
3114} 3118}
3115/** 3119/**
3116 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry 3120 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
@@ -3152,7 +3156,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3152 size_t size, 3156 size_t size,
3153 enum dma_data_direction dir) 3157 enum dma_data_direction dir)
3154{ 3158{
3155 dma_sync_single_for_cpu(&dev->dev, addr, size, dir); 3159 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
3156} 3160}
3157 3161
3158/** 3162/**
@@ -3167,7 +3171,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
3167 size_t size, 3171 size_t size,
3168 enum dma_data_direction dir) 3172 enum dma_data_direction dir)
3169{ 3173{
3170 dma_sync_single_for_device(&dev->dev, addr, size, dir); 3174 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
3171} 3175}
3172 3176
3173/** 3177/**
@@ -3182,7 +3186,7 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
3182 dma_addr_t *dma_handle, 3186 dma_addr_t *dma_handle,
3183 gfp_t flag) 3187 gfp_t flag)
3184{ 3188{
3185 return dma_alloc_coherent(&dev->dev, size, dma_handle, flag); 3189 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
3186} 3190}
3187 3191
3188/** 3192/**
@@ -3196,7 +3200,7 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
3196 size_t size, void *cpu_addr, 3200 size_t size, void *cpu_addr,
3197 dma_addr_t dma_handle) 3201 dma_addr_t dma_handle)
3198{ 3202{
3199 dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle); 3203 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
3200} 3204}
3201 3205
3202/** 3206/**
diff --git a/include/sound/cs35l35.h b/include/sound/cs35l35.h
new file mode 100644
index 000000000000..29da899e17e4
--- /dev/null
+++ b/include/sound/cs35l35.h
@@ -0,0 +1,108 @@
1/*
2 * linux/sound/cs35l35.h -- Platform data for CS35l35
3 *
4 * Copyright (c) 2016 Cirrus Logic Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __CS35L35_H
12#define __CS35L35_H
13
14struct classh_cfg {
15 /*
16 * Class H Algorithm Control Variables
17 * You can either have it done
18 * automatically or you can adjust
19 * these variables for tuning
20 *
21 * if you do not enable the internal algorithm
22 * you will get a set of mixer controls for
23 * Class H tuning
24 *
25 * Section 4.3 of the datasheet
26 */
27 bool classh_bst_override;
28 bool classh_algo_enable;
29 int classh_bst_max_limit;
30 int classh_mem_depth;
31 int classh_release_rate;
32 int classh_headroom;
33 int classh_wk_fet_disable;
34 int classh_wk_fet_delay;
35 int classh_wk_fet_thld;
36 int classh_vpch_auto;
37 int classh_vpch_rate;
38 int classh_vpch_man;
39};
40
41struct monitor_cfg {
42 /*
43 * Signal Monitor Data
44 * highly configurable signal monitoring
45 * data positioning and different types of
46 * monitoring data.
47 *
48 * Section 4.8.2 - 4.8.4 of the datasheet
49 */
50 bool is_present;
51 bool imon_specs;
52 bool vmon_specs;
53 bool vpmon_specs;
54 bool vbstmon_specs;
55 bool vpbrstat_specs;
56 bool zerofill_specs;
57 u8 imon_dpth;
58 u8 imon_loc;
59 u8 imon_frm;
60 u8 imon_scale;
61 u8 vmon_dpth;
62 u8 vmon_loc;
63 u8 vmon_frm;
64 u8 vpmon_dpth;
65 u8 vpmon_loc;
66 u8 vpmon_frm;
67 u8 vbstmon_dpth;
68 u8 vbstmon_loc;
69 u8 vbstmon_frm;
70 u8 vpbrstat_dpth;
71 u8 vpbrstat_loc;
72 u8 vpbrstat_frm;
73 u8 zerofill_dpth;
74 u8 zerofill_loc;
75 u8 zerofill_frm;
76};
77
78struct cs35l35_platform_data {
79
80 /* Stereo (2 Device) */
81 bool stereo;
82 /* serial port drive strength */
83 int sp_drv_str;
84 /* serial port drive in unused slots */
85 int sp_drv_unused;
86 /* Boost Power Down with FET */
87 bool bst_pdn_fet_on;
88 /* Boost Voltage : used if ClassH Algo Enabled */
89 int bst_vctl;
90 /* Boost Converter Peak Current CTRL */
91 int bst_ipk;
92 /* Amp Gain Zero Cross */
93 bool gain_zc;
94 /* Audio Input Location */
95 int aud_channel;
96 /* Advisory Input Location */
97 int adv_channel;
98 /* Shared Boost for stereo */
99 bool shared_bst;
100 /* Specifies this amp is using an external boost supply */
101 bool ext_bst;
102 /* ClassH Algorithm */
103 struct classh_cfg classh_algo;
104 /* Monitor Config */
105 struct monitor_cfg mon_cfg;
106};
107
108#endif /* __CS35L35_H */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index cdfb55f7aede..5170fd81e1fd 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -434,6 +434,8 @@ int snd_soc_codec_set_sysclk(struct snd_soc_codec *codec, int clk_id,
434 int source, unsigned int freq, int dir); 434 int source, unsigned int freq, int dir);
435int snd_soc_codec_set_pll(struct snd_soc_codec *codec, int pll_id, int source, 435int snd_soc_codec_set_pll(struct snd_soc_codec *codec, int pll_id, int source,
436 unsigned int freq_in, unsigned int freq_out); 436 unsigned int freq_in, unsigned int freq_out);
437int snd_soc_codec_set_jack(struct snd_soc_codec *codec,
438 struct snd_soc_jack *jack, void *data);
437 439
438int snd_soc_register_card(struct snd_soc_card *card); 440int snd_soc_register_card(struct snd_soc_card *card);
439int snd_soc_unregister_card(struct snd_soc_card *card); 441int snd_soc_unregister_card(struct snd_soc_card *card);
@@ -497,7 +499,15 @@ void snd_soc_runtime_deactivate(struct snd_soc_pcm_runtime *rtd, int stream);
497int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd, 499int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
498 unsigned int dai_fmt); 500 unsigned int dai_fmt);
499 501
502#ifdef CONFIG_DMI
500int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour); 503int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour);
504#else
505static inline int snd_soc_set_dmi_name(struct snd_soc_card *card,
506 const char *flavour)
507{
508 return 0;
509}
510#endif
501 511
502/* Utility functions to get clock rates from various things */ 512/* Utility functions to get clock rates from various things */
503int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots); 513int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots);
@@ -721,6 +731,7 @@ struct snd_soc_jack_gpio {
721 /* private: */ 731 /* private: */
722 struct snd_soc_jack *jack; 732 struct snd_soc_jack *jack;
723 struct delayed_work work; 733 struct delayed_work work;
734 struct notifier_block pm_notifier;
724 struct gpio_desc *desc; 735 struct gpio_desc *desc;
725 736
726 void *data; 737 void *data;
@@ -812,7 +823,6 @@ struct snd_soc_component {
812 823
813 unsigned int ignore_pmdown_time:1; /* pmdown_time is ignored at stop */ 824 unsigned int ignore_pmdown_time:1; /* pmdown_time is ignored at stop */
814 unsigned int registered_as_component:1; 825 unsigned int registered_as_component:1;
815 unsigned int auxiliary:1; /* for auxiliary component of the card */
816 unsigned int suspended:1; /* is in suspend PM state */ 826 unsigned int suspended:1; /* is in suspend PM state */
817 827
818 struct list_head list; 828 struct list_head list;
@@ -913,6 +923,8 @@ struct snd_soc_codec_driver {
913 int clk_id, int source, unsigned int freq, int dir); 923 int clk_id, int source, unsigned int freq, int dir);
914 int (*set_pll)(struct snd_soc_codec *codec, int pll_id, int source, 924 int (*set_pll)(struct snd_soc_codec *codec, int pll_id, int source,
915 unsigned int freq_in, unsigned int freq_out); 925 unsigned int freq_in, unsigned int freq_out);
926 int (*set_jack)(struct snd_soc_codec *codec,
927 struct snd_soc_jack *jack, void *data);
916 928
917 /* codec IO */ 929 /* codec IO */
918 struct regmap *(*get_regmap)(struct device *); 930 struct regmap *(*get_regmap)(struct device *);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 4b784b6e21c0..ccfad0e9c2cd 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -117,6 +117,7 @@ enum transport_state_table {
117 TRANSPORT_ISTATE_PROCESSING = 11, 117 TRANSPORT_ISTATE_PROCESSING = 11,
118 TRANSPORT_COMPLETE_QF_WP = 18, 118 TRANSPORT_COMPLETE_QF_WP = 18,
119 TRANSPORT_COMPLETE_QF_OK = 19, 119 TRANSPORT_COMPLETE_QF_OK = 19,
120 TRANSPORT_COMPLETE_QF_ERR = 20,
120}; 121};
121 122
122/* Used for struct se_cmd->se_cmd_flags */ 123/* Used for struct se_cmd->se_cmd_flags */
@@ -279,8 +280,6 @@ struct t10_alua_tg_pt_gp {
279 u16 tg_pt_gp_id; 280 u16 tg_pt_gp_id;
280 int tg_pt_gp_valid_id; 281 int tg_pt_gp_valid_id;
281 int tg_pt_gp_alua_supported_states; 282 int tg_pt_gp_alua_supported_states;
282 int tg_pt_gp_alua_pending_state;
283 int tg_pt_gp_alua_previous_state;
284 int tg_pt_gp_alua_access_status; 283 int tg_pt_gp_alua_access_status;
285 int tg_pt_gp_alua_access_type; 284 int tg_pt_gp_alua_access_type;
286 int tg_pt_gp_nonop_delay_msecs; 285 int tg_pt_gp_nonop_delay_msecs;
@@ -289,18 +288,16 @@ struct t10_alua_tg_pt_gp {
289 int tg_pt_gp_pref; 288 int tg_pt_gp_pref;
290 int tg_pt_gp_write_metadata; 289 int tg_pt_gp_write_metadata;
291 u32 tg_pt_gp_members; 290 u32 tg_pt_gp_members;
292 atomic_t tg_pt_gp_alua_access_state; 291 int tg_pt_gp_alua_access_state;
293 atomic_t tg_pt_gp_ref_cnt; 292 atomic_t tg_pt_gp_ref_cnt;
294 spinlock_t tg_pt_gp_lock; 293 spinlock_t tg_pt_gp_lock;
295 struct mutex tg_pt_gp_md_mutex; 294 struct mutex tg_pt_gp_transition_mutex;
296 struct se_device *tg_pt_gp_dev; 295 struct se_device *tg_pt_gp_dev;
297 struct config_group tg_pt_gp_group; 296 struct config_group tg_pt_gp_group;
298 struct list_head tg_pt_gp_list; 297 struct list_head tg_pt_gp_list;
299 struct list_head tg_pt_gp_lun_list; 298 struct list_head tg_pt_gp_lun_list;
300 struct se_lun *tg_pt_gp_alua_lun; 299 struct se_lun *tg_pt_gp_alua_lun;
301 struct se_node_acl *tg_pt_gp_alua_nacl; 300 struct se_node_acl *tg_pt_gp_alua_nacl;
302 struct work_struct tg_pt_gp_transition_work;
303 struct completion *tg_pt_gp_transition_complete;
304}; 301};
305 302
306struct t10_vpd { 303struct t10_vpd {
@@ -705,6 +702,7 @@ struct se_lun {
705 u64 unpacked_lun; 702 u64 unpacked_lun;
706#define SE_LUN_LINK_MAGIC 0xffff7771 703#define SE_LUN_LINK_MAGIC 0xffff7771
707 u32 lun_link_magic; 704 u32 lun_link_magic;
705 bool lun_shutdown;
708 bool lun_access_ro; 706 bool lun_access_ro;
709 u32 lun_index; 707 u32 lun_index;
710 708
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 9b1462e38b82..a076cf1a3a23 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -730,9 +730,11 @@ __SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
730__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc) 730__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
731#define __NR_pkey_free 290 731#define __NR_pkey_free 290
732__SYSCALL(__NR_pkey_free, sys_pkey_free) 732__SYSCALL(__NR_pkey_free, sys_pkey_free)
733#define __NR_statx 291
734__SYSCALL(__NR_statx, sys_statx)
733 735
734#undef __NR_syscalls 736#undef __NR_syscalls
735#define __NR_syscalls 291 737#define __NR_syscalls 292
736 738
737/* 739/*
738 * All syscalls below here should go away really, 740 * All syscalls below here should go away really,
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index dd9820b1c779..f8d9fed17ba9 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -445,6 +445,7 @@ header-y += unistd.h
445header-y += unix_diag.h 445header-y += unix_diag.h
446header-y += usbdevice_fs.h 446header-y += usbdevice_fs.h
447header-y += usbip.h 447header-y += usbip.h
448header-y += userio.h
448header-y += utime.h 449header-y += utime.h
449header-y += utsname.h 450header-y += utsname.h
450header-y += uuid.h 451header-y += uuid.h
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index db4c253f8011..dcfc3a5a9cb1 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -713,33 +713,6 @@ enum btrfs_err_code {
713 BTRFS_ERROR_DEV_ONLY_WRITABLE, 713 BTRFS_ERROR_DEV_ONLY_WRITABLE,
714 BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS 714 BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS
715}; 715};
716/* An error code to error string mapping for the kernel
717* error codes
718*/
719static inline char *btrfs_err_str(enum btrfs_err_code err_code)
720{
721 switch (err_code) {
722 case BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET:
723 return "unable to go below two devices on raid1";
724 case BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET:
725 return "unable to go below four devices on raid10";
726 case BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET:
727 return "unable to go below two devices on raid5";
728 case BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET:
729 return "unable to go below three devices on raid6";
730 case BTRFS_ERROR_DEV_TGT_REPLACE:
731 return "unable to remove the dev_replace target dev";
732 case BTRFS_ERROR_DEV_MISSING_NOT_FOUND:
733 return "no missing devices found to remove";
734 case BTRFS_ERROR_DEV_ONLY_WRITABLE:
735 return "unable to remove the only writeable device";
736 case BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS:
737 return "add/delete/balance/replace/resize operation "\
738 "in progress";
739 default:
740 return NULL;
741 }
742}
743 716
744#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \ 717#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
745 struct btrfs_ioctl_vol_args) 718 struct btrfs_ioctl_vol_args)
diff --git a/include/uapi/linux/ipv6_route.h b/include/uapi/linux/ipv6_route.h
index 85bbb1799df3..d496c02e14bc 100644
--- a/include/uapi/linux/ipv6_route.h
+++ b/include/uapi/linux/ipv6_route.h
@@ -35,7 +35,7 @@
35#define RTF_PREF(pref) ((pref) << 27) 35#define RTF_PREF(pref) ((pref) << 27)
36#define RTF_PREF_MASK 0x18000000 36#define RTF_PREF_MASK 0x18000000
37 37
38#define RTF_PCPU 0x40000000 38#define RTF_PCPU 0x40000000 /* read-only: can not be set by user */
39#define RTF_LOCAL 0x80000000 39#define RTF_LOCAL 0x80000000
40 40
41 41
diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
index 51a6b86e3700..d538897b8e08 100644
--- a/include/uapi/linux/stat.h
+++ b/include/uapi/linux/stat.h
@@ -114,7 +114,7 @@ struct statx {
114 __u64 stx_ino; /* Inode number */ 114 __u64 stx_ino; /* Inode number */
115 __u64 stx_size; /* File size */ 115 __u64 stx_size; /* File size */
116 __u64 stx_blocks; /* Number of 512-byte blocks allocated */ 116 __u64 stx_blocks; /* Number of 512-byte blocks allocated */
117 __u64 __spare1[1]; 117 __u64 stx_attributes_mask; /* Mask to show what's supported in stx_attributes */
118 /* 0x40 */ 118 /* 0x40 */
119 struct statx_timestamp stx_atime; /* Last access time */ 119 struct statx_timestamp stx_atime; /* Last access time */
120 struct statx_timestamp stx_btime; /* File creation time */ 120 struct statx_timestamp stx_btime; /* File creation time */
@@ -152,9 +152,10 @@ struct statx {
152#define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */ 152#define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */
153#define STATX_BTIME 0x00000800U /* Want/got stx_btime */ 153#define STATX_BTIME 0x00000800U /* Want/got stx_btime */
154#define STATX_ALL 0x00000fffU /* All currently supported flags */ 154#define STATX_ALL 0x00000fffU /* All currently supported flags */
155#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
155 156
156/* 157/*
157 * Attributes to be found in stx_attributes 158 * Attributes to be found in stx_attributes and masked in stx_attributes_mask.
158 * 159 *
159 * These give information about the features or the state of a file that might 160 * These give information about the features or the state of a file that might
160 * be of use to ordinary userspace programs such as GUIs or ls rather than 161 * be of use to ordinary userspace programs such as GUIs or ls rather than
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index 15b4385a2be1..90007a1abcab 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -79,7 +79,7 @@
79 * configuration space */ 79 * configuration space */
80#define VIRTIO_PCI_CONFIG_OFF(msix_enabled) ((msix_enabled) ? 24 : 20) 80#define VIRTIO_PCI_CONFIG_OFF(msix_enabled) ((msix_enabled) ? 24 : 20)
81/* Deprecated: please use VIRTIO_PCI_CONFIG_OFF instead */ 81/* Deprecated: please use VIRTIO_PCI_CONFIG_OFF instead */
82#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->pci_dev->msix_enabled) 82#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->msix_enabled)
83 83
84/* Virtio ABI version, this must match exactly */ 84/* Virtio ABI version, this must match exactly */
85#define VIRTIO_PCI_ABI_VERSION 0 85#define VIRTIO_PCI_ABI_VERSION 0
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index da7cd62bace7..0b3d30837a9f 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -34,6 +34,7 @@
34#define MLX5_ABI_USER_H 34#define MLX5_ABI_USER_H
35 35
36#include <linux/types.h> 36#include <linux/types.h>
37#include <linux/if_ether.h> /* For ETH_ALEN. */
37 38
38enum { 39enum {
39 MLX5_QP_FLAG_SIGNATURE = 1 << 0, 40 MLX5_QP_FLAG_SIGNATURE = 1 << 0,
@@ -66,7 +67,7 @@ struct mlx5_ib_alloc_ucontext_req {
66}; 67};
67 68
68enum mlx5_lib_caps { 69enum mlx5_lib_caps {
69 MLX5_LIB_CAP_4K_UAR = (u64)1 << 0, 70 MLX5_LIB_CAP_4K_UAR = (__u64)1 << 0,
70}; 71};
71 72
72struct mlx5_ib_alloc_ucontext_req_v2 { 73struct mlx5_ib_alloc_ucontext_req_v2 {
diff --git a/include/video/exynos5433_decon.h b/include/video/exynos5433_decon.h
index ef8e2a8ad0af..6b083d327e98 100644
--- a/include/video/exynos5433_decon.h
+++ b/include/video/exynos5433_decon.h
@@ -46,6 +46,7 @@
46#define DECON_FRAMEFIFO_STATUS 0x0524 46#define DECON_FRAMEFIFO_STATUS 0x0524
47#define DECON_CMU 0x1404 47#define DECON_CMU 0x1404
48#define DECON_UPDATE 0x1410 48#define DECON_UPDATE 0x1410
49#define DECON_CRFMID 0x1414
49#define DECON_UPDATE_SCHEME 0x1438 50#define DECON_UPDATE_SCHEME 0x1438
50#define DECON_VIDCON1 0x2000 51#define DECON_VIDCON1 0x2000
51#define DECON_VIDCON2 0x2004 52#define DECON_VIDCON2 0x2004
@@ -126,6 +127,10 @@
126 127
127/* VIDINTCON0 */ 128/* VIDINTCON0 */
128#define VIDINTCON0_FRAMEDONE (1 << 17) 129#define VIDINTCON0_FRAMEDONE (1 << 17)
130#define VIDINTCON0_FRAMESEL_BP (0 << 15)
131#define VIDINTCON0_FRAMESEL_VS (1 << 15)
132#define VIDINTCON0_FRAMESEL_AC (2 << 15)
133#define VIDINTCON0_FRAMESEL_FP (3 << 15)
129#define VIDINTCON0_INTFRMEN (1 << 12) 134#define VIDINTCON0_INTFRMEN (1 << 12)
130#define VIDINTCON0_INTEN (1 << 0) 135#define VIDINTCON0_INTEN (1 << 0)
131 136
@@ -142,6 +147,13 @@
142#define STANDALONE_UPDATE_F (1 << 0) 147#define STANDALONE_UPDATE_F (1 << 0)
143 148
144/* DECON_VIDCON1 */ 149/* DECON_VIDCON1 */
150#define VIDCON1_LINECNT_MASK (0x0fff << 16)
151#define VIDCON1_I80_ACTIVE (1 << 15)
152#define VIDCON1_VSTATUS_MASK (0x3 << 13)
153#define VIDCON1_VSTATUS_VS (0 << 13)
154#define VIDCON1_VSTATUS_BP (1 << 13)
155#define VIDCON1_VSTATUS_AC (2 << 13)
156#define VIDCON1_VSTATUS_FP (3 << 13)
145#define VIDCON1_VCLK_MASK (0x3 << 9) 157#define VIDCON1_VCLK_MASK (0x3 << 9)
146#define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9) 158#define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9)
147#define VIDCON1_VCLK_HOLD (0x0 << 9) 159#define VIDCON1_VCLK_HOLD (0x0 << 9)
diff --git a/init/main.c b/init/main.c
index f9c9d9948203..b0c11cbf5ddf 100644
--- a/init/main.c
+++ b/init/main.c
@@ -1022,6 +1022,8 @@ static noinline void __init kernel_init_freeable(void)
1022 1022
1023 workqueue_init(); 1023 workqueue_init();
1024 1024
1025 init_mm_internals();
1026
1025 do_pre_smp_initcalls(); 1027 do_pre_smp_initcalls();
1026 lockup_detector_init(); 1028 lockup_detector_init();
1027 1029
diff --git a/kernel/audit.c b/kernel/audit.c
index e794544f5e63..a871bf80fde1 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -54,6 +54,10 @@
54#include <linux/kthread.h> 54#include <linux/kthread.h>
55#include <linux/kernel.h> 55#include <linux/kernel.h>
56#include <linux/syscalls.h> 56#include <linux/syscalls.h>
57#include <linux/spinlock.h>
58#include <linux/rcupdate.h>
59#include <linux/mutex.h>
60#include <linux/gfp.h>
57 61
58#include <linux/audit.h> 62#include <linux/audit.h>
59 63
@@ -90,13 +94,34 @@ static u32 audit_default;
90/* If auditing cannot proceed, audit_failure selects what happens. */ 94/* If auditing cannot proceed, audit_failure selects what happens. */
91static u32 audit_failure = AUDIT_FAIL_PRINTK; 95static u32 audit_failure = AUDIT_FAIL_PRINTK;
92 96
93/* 97/* private audit network namespace index */
94 * If audit records are to be written to the netlink socket, audit_pid 98static unsigned int audit_net_id;
95 * contains the pid of the auditd process and audit_nlk_portid contains 99
96 * the portid to use to send netlink messages to that process. 100/**
101 * struct audit_net - audit private network namespace data
102 * @sk: communication socket
97 */ 103 */
98int audit_pid; 104struct audit_net {
99static __u32 audit_nlk_portid; 105 struct sock *sk;
106};
107
108/**
109 * struct auditd_connection - kernel/auditd connection state
110 * @pid: auditd PID
111 * @portid: netlink portid
112 * @net: the associated network namespace
113 * @lock: spinlock to protect write access
114 *
115 * Description:
116 * This struct is RCU protected; you must either hold the RCU lock for reading
117 * or the included spinlock for writing.
118 */
119static struct auditd_connection {
120 int pid;
121 u32 portid;
122 struct net *net;
123 spinlock_t lock;
124} auditd_conn;
100 125
101/* If audit_rate_limit is non-zero, limit the rate of sending audit records 126/* If audit_rate_limit is non-zero, limit the rate of sending audit records
102 * to that number per second. This prevents DoS attacks, but results in 127 * to that number per second. This prevents DoS attacks, but results in
@@ -123,10 +148,6 @@ u32 audit_sig_sid = 0;
123*/ 148*/
124static atomic_t audit_lost = ATOMIC_INIT(0); 149static atomic_t audit_lost = ATOMIC_INIT(0);
125 150
126/* The netlink socket. */
127static struct sock *audit_sock;
128static unsigned int audit_net_id;
129
130/* Hash for inode-based rules */ 151/* Hash for inode-based rules */
131struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS]; 152struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS];
132 153
@@ -192,6 +213,43 @@ struct audit_reply {
192 struct sk_buff *skb; 213 struct sk_buff *skb;
193}; 214};
194 215
216/**
217 * auditd_test_task - Check to see if a given task is an audit daemon
218 * @task: the task to check
219 *
220 * Description:
221 * Return 1 if the task is a registered audit daemon, 0 otherwise.
222 */
223int auditd_test_task(const struct task_struct *task)
224{
225 int rc;
226
227 rcu_read_lock();
228 rc = (auditd_conn.pid && task->tgid == auditd_conn.pid ? 1 : 0);
229 rcu_read_unlock();
230
231 return rc;
232}
233
234/**
235 * audit_get_sk - Return the audit socket for the given network namespace
236 * @net: the destination network namespace
237 *
238 * Description:
239 * Returns the sock pointer if valid, NULL otherwise. The caller must ensure
240 * that a reference is held for the network namespace while the sock is in use.
241 */
242static struct sock *audit_get_sk(const struct net *net)
243{
244 struct audit_net *aunet;
245
246 if (!net)
247 return NULL;
248
249 aunet = net_generic(net, audit_net_id);
250 return aunet->sk;
251}
252
195static void audit_set_portid(struct audit_buffer *ab, __u32 portid) 253static void audit_set_portid(struct audit_buffer *ab, __u32 portid)
196{ 254{
197 if (ab) { 255 if (ab) {
@@ -210,9 +268,7 @@ void audit_panic(const char *message)
210 pr_err("%s\n", message); 268 pr_err("%s\n", message);
211 break; 269 break;
212 case AUDIT_FAIL_PANIC: 270 case AUDIT_FAIL_PANIC:
213 /* test audit_pid since printk is always losey, why bother? */ 271 panic("audit: %s\n", message);
214 if (audit_pid)
215 panic("audit: %s\n", message);
216 break; 272 break;
217 } 273 }
218} 274}
@@ -370,21 +426,60 @@ static int audit_set_failure(u32 state)
370 return audit_do_config_change("audit_failure", &audit_failure, state); 426 return audit_do_config_change("audit_failure", &audit_failure, state);
371} 427}
372 428
373/* 429/**
374 * For one reason or another this nlh isn't getting delivered to the userspace 430 * auditd_set - Set/Reset the auditd connection state
375 * audit daemon, just send it to printk. 431 * @pid: auditd PID
432 * @portid: auditd netlink portid
433 * @net: auditd network namespace pointer
434 *
435 * Description:
436 * This function will obtain and drop network namespace references as
437 * necessary.
438 */
439static void auditd_set(int pid, u32 portid, struct net *net)
440{
441 unsigned long flags;
442
443 spin_lock_irqsave(&auditd_conn.lock, flags);
444 auditd_conn.pid = pid;
445 auditd_conn.portid = portid;
446 if (auditd_conn.net)
447 put_net(auditd_conn.net);
448 if (net)
449 auditd_conn.net = get_net(net);
450 else
451 auditd_conn.net = NULL;
452 spin_unlock_irqrestore(&auditd_conn.lock, flags);
453}
454
455/**
456 * kauditd_print_skb - Print the audit record to the ring buffer
457 * @skb: audit record
458 *
459 * Whatever the reason, this packet may not make it to the auditd connection
460 * so write it via printk so the information isn't completely lost.
376 */ 461 */
377static void kauditd_printk_skb(struct sk_buff *skb) 462static void kauditd_printk_skb(struct sk_buff *skb)
378{ 463{
379 struct nlmsghdr *nlh = nlmsg_hdr(skb); 464 struct nlmsghdr *nlh = nlmsg_hdr(skb);
380 char *data = nlmsg_data(nlh); 465 char *data = nlmsg_data(nlh);
381 466
382 if (nlh->nlmsg_type != AUDIT_EOE) { 467 if (nlh->nlmsg_type != AUDIT_EOE && printk_ratelimit())
383 if (printk_ratelimit()) 468 pr_notice("type=%d %s\n", nlh->nlmsg_type, data);
384 pr_notice("type=%d %s\n", nlh->nlmsg_type, data); 469}
385 else 470
386 audit_log_lost("printk limit exceeded"); 471/**
387 } 472 * kauditd_rehold_skb - Handle a audit record send failure in the hold queue
473 * @skb: audit record
474 *
475 * Description:
476 * This should only be used by the kauditd_thread when it fails to flush the
477 * hold queue.
478 */
479static void kauditd_rehold_skb(struct sk_buff *skb)
480{
481 /* put the record back in the queue at the same place */
482 skb_queue_head(&audit_hold_queue, skb);
388} 483}
389 484
390/** 485/**
@@ -444,65 +539,163 @@ static void kauditd_retry_skb(struct sk_buff *skb)
444 * auditd_reset - Disconnect the auditd connection 539 * auditd_reset - Disconnect the auditd connection
445 * 540 *
446 * Description: 541 * Description:
447 * Break the auditd/kauditd connection and move all the records in the retry 542 * Break the auditd/kauditd connection and move all the queued records into the
448 * queue into the hold queue in case auditd reconnects. The audit_cmd_mutex 543 * hold queue in case auditd reconnects.
449 * must be held when calling this function.
450 */ 544 */
451static void auditd_reset(void) 545static void auditd_reset(void)
452{ 546{
453 struct sk_buff *skb; 547 struct sk_buff *skb;
454 548
455 /* break the connection */ 549 /* if it isn't already broken, break the connection */
456 if (audit_sock) { 550 rcu_read_lock();
457 sock_put(audit_sock); 551 if (auditd_conn.pid)
458 audit_sock = NULL; 552 auditd_set(0, 0, NULL);
459 } 553 rcu_read_unlock();
460 audit_pid = 0;
461 audit_nlk_portid = 0;
462 554
463 /* flush all of the retry queue to the hold queue */ 555 /* flush all of the main and retry queues to the hold queue */
464 while ((skb = skb_dequeue(&audit_retry_queue))) 556 while ((skb = skb_dequeue(&audit_retry_queue)))
465 kauditd_hold_skb(skb); 557 kauditd_hold_skb(skb);
558 while ((skb = skb_dequeue(&audit_queue)))
559 kauditd_hold_skb(skb);
466} 560}
467 561
468/** 562/**
469 * kauditd_send_unicast_skb - Send a record via unicast to auditd 563 * auditd_send_unicast_skb - Send a record via unicast to auditd
470 * @skb: audit record 564 * @skb: audit record
565 *
566 * Description:
567 * Send a skb to the audit daemon, returns positive/zero values on success and
568 * negative values on failure; in all cases the skb will be consumed by this
569 * function. If the send results in -ECONNREFUSED the connection with auditd
570 * will be reset. This function may sleep so callers should not hold any locks
571 * where this would cause a problem.
471 */ 572 */
472static int kauditd_send_unicast_skb(struct sk_buff *skb) 573static int auditd_send_unicast_skb(struct sk_buff *skb)
473{ 574{
474 int rc; 575 int rc;
576 u32 portid;
577 struct net *net;
578 struct sock *sk;
579
580 /* NOTE: we can't call netlink_unicast while in the RCU section so
581 * take a reference to the network namespace and grab local
582 * copies of the namespace, the sock, and the portid; the
583 * namespace and sock aren't going to go away while we hold a
584 * reference and if the portid does become invalid after the RCU
585 * section netlink_unicast() should safely return an error */
586
587 rcu_read_lock();
588 if (!auditd_conn.pid) {
589 rcu_read_unlock();
590 rc = -ECONNREFUSED;
591 goto err;
592 }
593 net = auditd_conn.net;
594 get_net(net);
595 sk = audit_get_sk(net);
596 portid = auditd_conn.portid;
597 rcu_read_unlock();
475 598
476 /* if we know nothing is connected, don't even try the netlink call */ 599 rc = netlink_unicast(sk, skb, portid, 0);
477 if (!audit_pid) 600 put_net(net);
478 return -ECONNREFUSED; 601 if (rc < 0)
602 goto err;
479 603
480 /* get an extra skb reference in case we fail to send */ 604 return rc;
481 skb_get(skb);
482 rc = netlink_unicast(audit_sock, skb, audit_nlk_portid, 0);
483 if (rc >= 0) {
484 consume_skb(skb);
485 rc = 0;
486 }
487 605
606err:
607 if (rc == -ECONNREFUSED)
608 auditd_reset();
488 return rc; 609 return rc;
489} 610}
490 611
612/**
613 * kauditd_send_queue - Helper for kauditd_thread to flush skb queues
614 * @sk: the sending sock
615 * @portid: the netlink destination
616 * @queue: the skb queue to process
617 * @retry_limit: limit on number of netlink unicast failures
618 * @skb_hook: per-skb hook for additional processing
619 * @err_hook: hook called if the skb fails the netlink unicast send
620 *
621 * Description:
622 * Run through the given queue and attempt to send the audit records to auditd,
623 * returns zero on success, negative values on failure. It is up to the caller
624 * to ensure that the @sk is valid for the duration of this function.
625 *
626 */
627static int kauditd_send_queue(struct sock *sk, u32 portid,
628 struct sk_buff_head *queue,
629 unsigned int retry_limit,
630 void (*skb_hook)(struct sk_buff *skb),
631 void (*err_hook)(struct sk_buff *skb))
632{
633 int rc = 0;
634 struct sk_buff *skb;
635 static unsigned int failed = 0;
636
637 /* NOTE: kauditd_thread takes care of all our locking, we just use
638 * the netlink info passed to us (e.g. sk and portid) */
639
640 while ((skb = skb_dequeue(queue))) {
641 /* call the skb_hook for each skb we touch */
642 if (skb_hook)
643 (*skb_hook)(skb);
644
645 /* can we send to anyone via unicast? */
646 if (!sk) {
647 if (err_hook)
648 (*err_hook)(skb);
649 continue;
650 }
651
652 /* grab an extra skb reference in case of error */
653 skb_get(skb);
654 rc = netlink_unicast(sk, skb, portid, 0);
655 if (rc < 0) {
656 /* fatal failure for our queue flush attempt? */
657 if (++failed >= retry_limit ||
658 rc == -ECONNREFUSED || rc == -EPERM) {
659 /* yes - error processing for the queue */
660 sk = NULL;
661 if (err_hook)
662 (*err_hook)(skb);
663 if (!skb_hook)
664 goto out;
665 /* keep processing with the skb_hook */
666 continue;
667 } else
668 /* no - requeue to preserve ordering */
669 skb_queue_head(queue, skb);
670 } else {
671 /* it worked - drop the extra reference and continue */
672 consume_skb(skb);
673 failed = 0;
674 }
675 }
676
677out:
678 return (rc >= 0 ? 0 : rc);
679}
680
491/* 681/*
492 * kauditd_send_multicast_skb - Send a record to any multicast listeners 682 * kauditd_send_multicast_skb - Send a record to any multicast listeners
493 * @skb: audit record 683 * @skb: audit record
494 * 684 *
495 * Description: 685 * Description:
496 * This function doesn't consume an skb as might be expected since it has to 686 * Write a multicast message to anyone listening in the initial network
497 * copy it anyways. 687 * namespace. This function doesn't consume an skb as might be expected since
688 * it has to copy it anyways.
498 */ 689 */
499static void kauditd_send_multicast_skb(struct sk_buff *skb) 690static void kauditd_send_multicast_skb(struct sk_buff *skb)
500{ 691{
501 struct sk_buff *copy; 692 struct sk_buff *copy;
502 struct audit_net *aunet = net_generic(&init_net, audit_net_id); 693 struct sock *sock = audit_get_sk(&init_net);
503 struct sock *sock = aunet->nlsk;
504 struct nlmsghdr *nlh; 694 struct nlmsghdr *nlh;
505 695
696 /* NOTE: we are not taking an additional reference for init_net since
697 * we don't have to worry about it going away */
698
506 if (!netlink_has_listeners(sock, AUDIT_NLGRP_READLOG)) 699 if (!netlink_has_listeners(sock, AUDIT_NLGRP_READLOG))
507 return; 700 return;
508 701
@@ -526,149 +719,79 @@ static void kauditd_send_multicast_skb(struct sk_buff *skb)
526} 719}
527 720
528/** 721/**
529 * kauditd_wake_condition - Return true when it is time to wake kauditd_thread 722 * kauditd_thread - Worker thread to send audit records to userspace
530 * 723 * @dummy: unused
531 * Description:
532 * This function is for use by the wait_event_freezable() call in
533 * kauditd_thread().
534 */ 724 */
535static int kauditd_wake_condition(void)
536{
537 static int pid_last = 0;
538 int rc;
539 int pid = audit_pid;
540
541 /* wake on new messages or a change in the connected auditd */
542 rc = skb_queue_len(&audit_queue) || (pid && pid != pid_last);
543 if (rc)
544 pid_last = pid;
545
546 return rc;
547}
548
549static int kauditd_thread(void *dummy) 725static int kauditd_thread(void *dummy)
550{ 726{
551 int rc; 727 int rc;
552 int auditd = 0; 728 u32 portid = 0;
553 int reschedule = 0; 729 struct net *net = NULL;
554 struct sk_buff *skb; 730 struct sock *sk = NULL;
555 struct nlmsghdr *nlh;
556 731
557#define UNICAST_RETRIES 5 732#define UNICAST_RETRIES 5
558#define AUDITD_BAD(x,y) \
559 ((x) == -ECONNREFUSED || (x) == -EPERM || ++(y) >= UNICAST_RETRIES)
560
561 /* NOTE: we do invalidate the auditd connection flag on any sending
562 * errors, but we only "restore" the connection flag at specific places
563 * in the loop in order to help ensure proper ordering of audit
564 * records */
565 733
566 set_freezable(); 734 set_freezable();
567 while (!kthread_should_stop()) { 735 while (!kthread_should_stop()) {
568 /* NOTE: possible area for future improvement is to look at 736 /* NOTE: see the lock comments in auditd_send_unicast_skb() */
569 * the hold and retry queues, since only this thread 737 rcu_read_lock();
570 * has access to these queues we might be able to do 738 if (!auditd_conn.pid) {
571 * our own queuing and skip some/all of the locking */ 739 rcu_read_unlock();
572 740 goto main_queue;
573 /* NOTE: it might be a fun experiment to split the hold and 741 }
574 * retry queue handling to another thread, but the 742 net = auditd_conn.net;
575 * synchronization issues and other overhead might kill 743 get_net(net);
576 * any performance gains */ 744 sk = audit_get_sk(net);
745 portid = auditd_conn.portid;
746 rcu_read_unlock();
577 747
578 /* attempt to flush the hold queue */ 748 /* attempt to flush the hold queue */
579 while (auditd && (skb = skb_dequeue(&audit_hold_queue))) { 749 rc = kauditd_send_queue(sk, portid,
580 rc = kauditd_send_unicast_skb(skb); 750 &audit_hold_queue, UNICAST_RETRIES,
581 if (rc) { 751 NULL, kauditd_rehold_skb);
582 /* requeue to the same spot */ 752 if (rc < 0) {
583 skb_queue_head(&audit_hold_queue, skb); 753 sk = NULL;
584 754 auditd_reset();
585 auditd = 0; 755 goto main_queue;
586 if (AUDITD_BAD(rc, reschedule)) {
587 mutex_lock(&audit_cmd_mutex);
588 auditd_reset();
589 mutex_unlock(&audit_cmd_mutex);
590 reschedule = 0;
591 }
592 } else
593 /* we were able to send successfully */
594 reschedule = 0;
595 } 756 }
596 757
597 /* attempt to flush the retry queue */ 758 /* attempt to flush the retry queue */
598 while (auditd && (skb = skb_dequeue(&audit_retry_queue))) { 759 rc = kauditd_send_queue(sk, portid,
599 rc = kauditd_send_unicast_skb(skb); 760 &audit_retry_queue, UNICAST_RETRIES,
600 if (rc) { 761 NULL, kauditd_hold_skb);
601 auditd = 0; 762 if (rc < 0) {
602 if (AUDITD_BAD(rc, reschedule)) { 763 sk = NULL;
603 kauditd_hold_skb(skb); 764 auditd_reset();
604 mutex_lock(&audit_cmd_mutex); 765 goto main_queue;
605 auditd_reset();
606 mutex_unlock(&audit_cmd_mutex);
607 reschedule = 0;
608 } else
609 /* temporary problem (we hope), queue
610 * to the same spot and retry */
611 skb_queue_head(&audit_retry_queue, skb);
612 } else
613 /* we were able to send successfully */
614 reschedule = 0;
615 } 766 }
616 767
617 /* standard queue processing, try to be as quick as possible */ 768main_queue:
618quick_loop: 769 /* process the main queue - do the multicast send and attempt
619 skb = skb_dequeue(&audit_queue); 770 * unicast, dump failed record sends to the retry queue; if
620 if (skb) { 771 * sk == NULL due to previous failures we will just do the
621 /* setup the netlink header, see the comments in 772 * multicast send and move the record to the retry queue */
622 * kauditd_send_multicast_skb() for length quirks */ 773 rc = kauditd_send_queue(sk, portid, &audit_queue, 1,
623 nlh = nlmsg_hdr(skb); 774 kauditd_send_multicast_skb,
624 nlh->nlmsg_len = skb->len - NLMSG_HDRLEN; 775 kauditd_retry_skb);
625 776 if (sk == NULL || rc < 0)
626 /* attempt to send to any multicast listeners */ 777 auditd_reset();
627 kauditd_send_multicast_skb(skb); 778 sk = NULL;
628 779
629 /* attempt to send to auditd, queue on failure */ 780 /* drop our netns reference, no auditd sends past this line */
630 if (auditd) { 781 if (net) {
631 rc = kauditd_send_unicast_skb(skb); 782 put_net(net);
632 if (rc) { 783 net = NULL;
633 auditd = 0;
634 if (AUDITD_BAD(rc, reschedule)) {
635 mutex_lock(&audit_cmd_mutex);
636 auditd_reset();
637 mutex_unlock(&audit_cmd_mutex);
638 reschedule = 0;
639 }
640
641 /* move to the retry queue */
642 kauditd_retry_skb(skb);
643 } else
644 /* everything is working so go fast! */
645 goto quick_loop;
646 } else if (reschedule)
647 /* we are currently having problems, move to
648 * the retry queue */
649 kauditd_retry_skb(skb);
650 else
651 /* dump the message via printk and hold it */
652 kauditd_hold_skb(skb);
653 } else {
654 /* we have flushed the backlog so wake everyone */
655 wake_up(&audit_backlog_wait);
656
657 /* if everything is okay with auditd (if present), go
658 * to sleep until there is something new in the queue
659 * or we have a change in the connected auditd;
660 * otherwise simply reschedule to give things a chance
661 * to recover */
662 if (reschedule) {
663 set_current_state(TASK_INTERRUPTIBLE);
664 schedule();
665 } else
666 wait_event_freezable(kauditd_wait,
667 kauditd_wake_condition());
668
669 /* update the auditd connection status */
670 auditd = (audit_pid ? 1 : 0);
671 } 784 }
785
786 /* we have processed all the queues so wake everyone */
787 wake_up(&audit_backlog_wait);
788
789 /* NOTE: we want to wake up if there is anything on the queue,
790 * regardless of if an auditd is connected, as we need to
791 * do the multicast send and rotate records from the
792 * main queue to the retry/hold queues */
793 wait_event_freezable(kauditd_wait,
794 (skb_queue_len(&audit_queue) ? 1 : 0));
672 } 795 }
673 796
674 return 0; 797 return 0;
@@ -678,17 +801,16 @@ int audit_send_list(void *_dest)
678{ 801{
679 struct audit_netlink_list *dest = _dest; 802 struct audit_netlink_list *dest = _dest;
680 struct sk_buff *skb; 803 struct sk_buff *skb;
681 struct net *net = dest->net; 804 struct sock *sk = audit_get_sk(dest->net);
682 struct audit_net *aunet = net_generic(net, audit_net_id);
683 805
684 /* wait for parent to finish and send an ACK */ 806 /* wait for parent to finish and send an ACK */
685 mutex_lock(&audit_cmd_mutex); 807 mutex_lock(&audit_cmd_mutex);
686 mutex_unlock(&audit_cmd_mutex); 808 mutex_unlock(&audit_cmd_mutex);
687 809
688 while ((skb = __skb_dequeue(&dest->q)) != NULL) 810 while ((skb = __skb_dequeue(&dest->q)) != NULL)
689 netlink_unicast(aunet->nlsk, skb, dest->portid, 0); 811 netlink_unicast(sk, skb, dest->portid, 0);
690 812
691 put_net(net); 813 put_net(dest->net);
692 kfree(dest); 814 kfree(dest);
693 815
694 return 0; 816 return 0;
@@ -722,16 +844,15 @@ out_kfree_skb:
722static int audit_send_reply_thread(void *arg) 844static int audit_send_reply_thread(void *arg)
723{ 845{
724 struct audit_reply *reply = (struct audit_reply *)arg; 846 struct audit_reply *reply = (struct audit_reply *)arg;
725 struct net *net = reply->net; 847 struct sock *sk = audit_get_sk(reply->net);
726 struct audit_net *aunet = net_generic(net, audit_net_id);
727 848
728 mutex_lock(&audit_cmd_mutex); 849 mutex_lock(&audit_cmd_mutex);
729 mutex_unlock(&audit_cmd_mutex); 850 mutex_unlock(&audit_cmd_mutex);
730 851
731 /* Ignore failure. It'll only happen if the sender goes away, 852 /* Ignore failure. It'll only happen if the sender goes away,
732 because our timeout is set to infinite. */ 853 because our timeout is set to infinite. */
733 netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0); 854 netlink_unicast(sk, reply->skb, reply->portid, 0);
734 put_net(net); 855 put_net(reply->net);
735 kfree(reply); 856 kfree(reply);
736 return 0; 857 return 0;
737} 858}
@@ -949,12 +1070,12 @@ static int audit_set_feature(struct sk_buff *skb)
949 1070
950static int audit_replace(pid_t pid) 1071static int audit_replace(pid_t pid)
951{ 1072{
952 struct sk_buff *skb = audit_make_reply(0, 0, AUDIT_REPLACE, 0, 0, 1073 struct sk_buff *skb;
953 &pid, sizeof(pid));
954 1074
1075 skb = audit_make_reply(0, 0, AUDIT_REPLACE, 0, 0, &pid, sizeof(pid));
955 if (!skb) 1076 if (!skb)
956 return -ENOMEM; 1077 return -ENOMEM;
957 return netlink_unicast(audit_sock, skb, audit_nlk_portid, 0); 1078 return auditd_send_unicast_skb(skb);
958} 1079}
959 1080
960static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 1081static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
@@ -981,7 +1102,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
981 memset(&s, 0, sizeof(s)); 1102 memset(&s, 0, sizeof(s));
982 s.enabled = audit_enabled; 1103 s.enabled = audit_enabled;
983 s.failure = audit_failure; 1104 s.failure = audit_failure;
984 s.pid = audit_pid; 1105 rcu_read_lock();
1106 s.pid = auditd_conn.pid;
1107 rcu_read_unlock();
985 s.rate_limit = audit_rate_limit; 1108 s.rate_limit = audit_rate_limit;
986 s.backlog_limit = audit_backlog_limit; 1109 s.backlog_limit = audit_backlog_limit;
987 s.lost = atomic_read(&audit_lost); 1110 s.lost = atomic_read(&audit_lost);
@@ -1014,30 +1137,44 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1014 * from the initial pid namespace, but something 1137 * from the initial pid namespace, but something
1015 * to keep in mind if this changes */ 1138 * to keep in mind if this changes */
1016 int new_pid = s.pid; 1139 int new_pid = s.pid;
1140 pid_t auditd_pid;
1017 pid_t requesting_pid = task_tgid_vnr(current); 1141 pid_t requesting_pid = task_tgid_vnr(current);
1018 1142
1019 if ((!new_pid) && (requesting_pid != audit_pid)) { 1143 /* test the auditd connection */
1020 audit_log_config_change("audit_pid", new_pid, audit_pid, 0); 1144 audit_replace(requesting_pid);
1145
1146 rcu_read_lock();
1147 auditd_pid = auditd_conn.pid;
1148 /* only the current auditd can unregister itself */
1149 if ((!new_pid) && (requesting_pid != auditd_pid)) {
1150 rcu_read_unlock();
1151 audit_log_config_change("audit_pid", new_pid,
1152 auditd_pid, 0);
1021 return -EACCES; 1153 return -EACCES;
1022 } 1154 }
1023 if (audit_pid && new_pid && 1155 /* replacing a healthy auditd is not allowed */
1024 audit_replace(requesting_pid) != -ECONNREFUSED) { 1156 if (auditd_pid && new_pid) {
1025 audit_log_config_change("audit_pid", new_pid, audit_pid, 0); 1157 rcu_read_unlock();
1158 audit_log_config_change("audit_pid", new_pid,
1159 auditd_pid, 0);
1026 return -EEXIST; 1160 return -EEXIST;
1027 } 1161 }
1162 rcu_read_unlock();
1163
1028 if (audit_enabled != AUDIT_OFF) 1164 if (audit_enabled != AUDIT_OFF)
1029 audit_log_config_change("audit_pid", new_pid, audit_pid, 1); 1165 audit_log_config_change("audit_pid", new_pid,
1166 auditd_pid, 1);
1167
1030 if (new_pid) { 1168 if (new_pid) {
1031 if (audit_sock) 1169 /* register a new auditd connection */
1032 sock_put(audit_sock); 1170 auditd_set(new_pid,
1033 audit_pid = new_pid; 1171 NETLINK_CB(skb).portid,
1034 audit_nlk_portid = NETLINK_CB(skb).portid; 1172 sock_net(NETLINK_CB(skb).sk));
1035 sock_hold(skb->sk); 1173 /* try to process any backlog */
1036 audit_sock = skb->sk; 1174 wake_up_interruptible(&kauditd_wait);
1037 } else { 1175 } else
1176 /* unregister the auditd connection */
1038 auditd_reset(); 1177 auditd_reset();
1039 }
1040 wake_up_interruptible(&kauditd_wait);
1041 } 1178 }
1042 if (s.mask & AUDIT_STATUS_RATE_LIMIT) { 1179 if (s.mask & AUDIT_STATUS_RATE_LIMIT) {
1043 err = audit_set_rate_limit(s.rate_limit); 1180 err = audit_set_rate_limit(s.rate_limit);
@@ -1090,7 +1227,6 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1090 if (err) 1227 if (err)
1091 break; 1228 break;
1092 } 1229 }
1093 mutex_unlock(&audit_cmd_mutex);
1094 audit_log_common_recv_msg(&ab, msg_type); 1230 audit_log_common_recv_msg(&ab, msg_type);
1095 if (msg_type != AUDIT_USER_TTY) 1231 if (msg_type != AUDIT_USER_TTY)
1096 audit_log_format(ab, " msg='%.*s'", 1232 audit_log_format(ab, " msg='%.*s'",
@@ -1108,7 +1244,6 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1108 } 1244 }
1109 audit_set_portid(ab, NETLINK_CB(skb).portid); 1245 audit_set_portid(ab, NETLINK_CB(skb).portid);
1110 audit_log_end(ab); 1246 audit_log_end(ab);
1111 mutex_lock(&audit_cmd_mutex);
1112 } 1247 }
1113 break; 1248 break;
1114 case AUDIT_ADD_RULE: 1249 case AUDIT_ADD_RULE:
@@ -1298,26 +1433,26 @@ static int __net_init audit_net_init(struct net *net)
1298 1433
1299 struct audit_net *aunet = net_generic(net, audit_net_id); 1434 struct audit_net *aunet = net_generic(net, audit_net_id);
1300 1435
1301 aunet->nlsk = netlink_kernel_create(net, NETLINK_AUDIT, &cfg); 1436 aunet->sk = netlink_kernel_create(net, NETLINK_AUDIT, &cfg);
1302 if (aunet->nlsk == NULL) { 1437 if (aunet->sk == NULL) {
1303 audit_panic("cannot initialize netlink socket in namespace"); 1438 audit_panic("cannot initialize netlink socket in namespace");
1304 return -ENOMEM; 1439 return -ENOMEM;
1305 } 1440 }
1306 aunet->nlsk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 1441 aunet->sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
1442
1307 return 0; 1443 return 0;
1308} 1444}
1309 1445
1310static void __net_exit audit_net_exit(struct net *net) 1446static void __net_exit audit_net_exit(struct net *net)
1311{ 1447{
1312 struct audit_net *aunet = net_generic(net, audit_net_id); 1448 struct audit_net *aunet = net_generic(net, audit_net_id);
1313 struct sock *sock = aunet->nlsk; 1449
1314 mutex_lock(&audit_cmd_mutex); 1450 rcu_read_lock();
1315 if (sock == audit_sock) 1451 if (net == auditd_conn.net)
1316 auditd_reset(); 1452 auditd_reset();
1317 mutex_unlock(&audit_cmd_mutex); 1453 rcu_read_unlock();
1318 1454
1319 netlink_kernel_release(sock); 1455 netlink_kernel_release(aunet->sk);
1320 aunet->nlsk = NULL;
1321} 1456}
1322 1457
1323static struct pernet_operations audit_net_ops __net_initdata = { 1458static struct pernet_operations audit_net_ops __net_initdata = {
@@ -1335,20 +1470,24 @@ static int __init audit_init(void)
1335 if (audit_initialized == AUDIT_DISABLED) 1470 if (audit_initialized == AUDIT_DISABLED)
1336 return 0; 1471 return 0;
1337 1472
1338 pr_info("initializing netlink subsys (%s)\n", 1473 memset(&auditd_conn, 0, sizeof(auditd_conn));
1339 audit_default ? "enabled" : "disabled"); 1474 spin_lock_init(&auditd_conn.lock);
1340 register_pernet_subsys(&audit_net_ops);
1341 1475
1342 skb_queue_head_init(&audit_queue); 1476 skb_queue_head_init(&audit_queue);
1343 skb_queue_head_init(&audit_retry_queue); 1477 skb_queue_head_init(&audit_retry_queue);
1344 skb_queue_head_init(&audit_hold_queue); 1478 skb_queue_head_init(&audit_hold_queue);
1345 audit_initialized = AUDIT_INITIALIZED;
1346 audit_enabled = audit_default;
1347 audit_ever_enabled |= !!audit_default;
1348 1479
1349 for (i = 0; i < AUDIT_INODE_BUCKETS; i++) 1480 for (i = 0; i < AUDIT_INODE_BUCKETS; i++)
1350 INIT_LIST_HEAD(&audit_inode_hash[i]); 1481 INIT_LIST_HEAD(&audit_inode_hash[i]);
1351 1482
1483 pr_info("initializing netlink subsys (%s)\n",
1484 audit_default ? "enabled" : "disabled");
1485 register_pernet_subsys(&audit_net_ops);
1486
1487 audit_initialized = AUDIT_INITIALIZED;
1488 audit_enabled = audit_default;
1489 audit_ever_enabled |= !!audit_default;
1490
1352 kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd"); 1491 kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd");
1353 if (IS_ERR(kauditd_task)) { 1492 if (IS_ERR(kauditd_task)) {
1354 int err = PTR_ERR(kauditd_task); 1493 int err = PTR_ERR(kauditd_task);
@@ -1519,20 +1658,16 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
1519 if (unlikely(!audit_filter(type, AUDIT_FILTER_TYPE))) 1658 if (unlikely(!audit_filter(type, AUDIT_FILTER_TYPE)))
1520 return NULL; 1659 return NULL;
1521 1660
1522 /* don't ever fail/sleep on these two conditions: 1661 /* NOTE: don't ever fail/sleep on these two conditions:
1523 * 1. auditd generated record - since we need auditd to drain the 1662 * 1. auditd generated record - since we need auditd to drain the
1524 * queue; also, when we are checking for auditd, compare PIDs using 1663 * queue; also, when we are checking for auditd, compare PIDs using
1525 * task_tgid_vnr() since auditd_pid is set in audit_receive_msg() 1664 * task_tgid_vnr() since auditd_pid is set in audit_receive_msg()
1526 * using a PID anchored in the caller's namespace 1665 * using a PID anchored in the caller's namespace
1527 * 2. audit command message - record types 1000 through 1099 inclusive 1666 * 2. generator holding the audit_cmd_mutex - we don't want to block
1528 * are command messages/records used to manage the kernel subsystem 1667 * while holding the mutex */
1529 * and the audit userspace, blocking on these messages could cause 1668 if (!(auditd_test_task(current) ||
1530 * problems under load so don't do it (note: not all of these 1669 (current == __mutex_owner(&audit_cmd_mutex)))) {
1531 * command types are valid as record types, but it is quicker to 1670 long stime = audit_backlog_wait_time;
1532 * just check two ints than a series of ints in a if/switch stmt) */
1533 if (!((audit_pid && audit_pid == task_tgid_vnr(current)) ||
1534 (type >= 1000 && type <= 1099))) {
1535 long sleep_time = audit_backlog_wait_time;
1536 1671
1537 while (audit_backlog_limit && 1672 while (audit_backlog_limit &&
1538 (skb_queue_len(&audit_queue) > audit_backlog_limit)) { 1673 (skb_queue_len(&audit_queue) > audit_backlog_limit)) {
@@ -1541,14 +1676,13 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
1541 1676
1542 /* sleep if we are allowed and we haven't exhausted our 1677 /* sleep if we are allowed and we haven't exhausted our
1543 * backlog wait limit */ 1678 * backlog wait limit */
1544 if ((gfp_mask & __GFP_DIRECT_RECLAIM) && 1679 if (gfpflags_allow_blocking(gfp_mask) && (stime > 0)) {
1545 (sleep_time > 0)) {
1546 DECLARE_WAITQUEUE(wait, current); 1680 DECLARE_WAITQUEUE(wait, current);
1547 1681
1548 add_wait_queue_exclusive(&audit_backlog_wait, 1682 add_wait_queue_exclusive(&audit_backlog_wait,
1549 &wait); 1683 &wait);
1550 set_current_state(TASK_UNINTERRUPTIBLE); 1684 set_current_state(TASK_UNINTERRUPTIBLE);
1551 sleep_time = schedule_timeout(sleep_time); 1685 stime = schedule_timeout(stime);
1552 remove_wait_queue(&audit_backlog_wait, &wait); 1686 remove_wait_queue(&audit_backlog_wait, &wait);
1553 } else { 1687 } else {
1554 if (audit_rate_check() && printk_ratelimit()) 1688 if (audit_rate_check() && printk_ratelimit())
@@ -2127,15 +2261,27 @@ out:
2127 */ 2261 */
2128void audit_log_end(struct audit_buffer *ab) 2262void audit_log_end(struct audit_buffer *ab)
2129{ 2263{
2264 struct sk_buff *skb;
2265 struct nlmsghdr *nlh;
2266
2130 if (!ab) 2267 if (!ab)
2131 return; 2268 return;
2132 if (!audit_rate_check()) { 2269
2133 audit_log_lost("rate limit exceeded"); 2270 if (audit_rate_check()) {
2134 } else { 2271 skb = ab->skb;
2135 skb_queue_tail(&audit_queue, ab->skb);
2136 wake_up_interruptible(&kauditd_wait);
2137 ab->skb = NULL; 2272 ab->skb = NULL;
2138 } 2273
2274 /* setup the netlink header, see the comments in
2275 * kauditd_send_multicast_skb() for length quirks */
2276 nlh = nlmsg_hdr(skb);
2277 nlh->nlmsg_len = skb->len - NLMSG_HDRLEN;
2278
2279 /* queue the netlink packet and poke the kauditd thread */
2280 skb_queue_tail(&audit_queue, skb);
2281 wake_up_interruptible(&kauditd_wait);
2282 } else
2283 audit_log_lost("rate limit exceeded");
2284
2139 audit_buffer_free(ab); 2285 audit_buffer_free(ab);
2140} 2286}
2141 2287
diff --git a/kernel/audit.h b/kernel/audit.h
index ca579880303a..0d87f8ab8778 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -218,7 +218,7 @@ extern void audit_log_name(struct audit_context *context,
218 struct audit_names *n, const struct path *path, 218 struct audit_names *n, const struct path *path,
219 int record_num, int *call_panic); 219 int record_num, int *call_panic);
220 220
221extern int audit_pid; 221extern int auditd_test_task(const struct task_struct *task);
222 222
223#define AUDIT_INODE_BUCKETS 32 223#define AUDIT_INODE_BUCKETS 32
224extern struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS]; 224extern struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS];
@@ -250,10 +250,6 @@ struct audit_netlink_list {
250 250
251int audit_send_list(void *); 251int audit_send_list(void *);
252 252
253struct audit_net {
254 struct sock *nlsk;
255};
256
257extern int selinux_audit_rule_update(void); 253extern int selinux_audit_rule_update(void);
258 254
259extern struct mutex audit_filter_mutex; 255extern struct mutex audit_filter_mutex;
@@ -337,14 +333,7 @@ extern u32 audit_sig_sid;
337extern int audit_filter(int msgtype, unsigned int listtype); 333extern int audit_filter(int msgtype, unsigned int listtype);
338 334
339#ifdef CONFIG_AUDITSYSCALL 335#ifdef CONFIG_AUDITSYSCALL
340extern int __audit_signal_info(int sig, struct task_struct *t); 336extern int audit_signal_info(int sig, struct task_struct *t);
341static inline int audit_signal_info(int sig, struct task_struct *t)
342{
343 if (unlikely((audit_pid && t->tgid == audit_pid) ||
344 (audit_signals && !audit_dummy_context())))
345 return __audit_signal_info(sig, t);
346 return 0;
347}
348extern void audit_filter_inodes(struct task_struct *, struct audit_context *); 337extern void audit_filter_inodes(struct task_struct *, struct audit_context *);
349extern struct list_head *audit_killed_trees(void); 338extern struct list_head *audit_killed_trees(void);
350#else 339#else
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index d6a8de5f8fa3..1c2333155893 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -762,7 +762,7 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk,
762 struct audit_entry *e; 762 struct audit_entry *e;
763 enum audit_state state; 763 enum audit_state state;
764 764
765 if (audit_pid && tsk->tgid == audit_pid) 765 if (auditd_test_task(tsk))
766 return AUDIT_DISABLED; 766 return AUDIT_DISABLED;
767 767
768 rcu_read_lock(); 768 rcu_read_lock();
@@ -816,7 +816,7 @@ void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx)
816{ 816{
817 struct audit_names *n; 817 struct audit_names *n;
818 818
819 if (audit_pid && tsk->tgid == audit_pid) 819 if (auditd_test_task(tsk))
820 return; 820 return;
821 821
822 rcu_read_lock(); 822 rcu_read_lock();
@@ -2249,26 +2249,27 @@ void __audit_ptrace(struct task_struct *t)
2249 * If the audit subsystem is being terminated, record the task (pid) 2249 * If the audit subsystem is being terminated, record the task (pid)
2250 * and uid that is doing that. 2250 * and uid that is doing that.
2251 */ 2251 */
2252int __audit_signal_info(int sig, struct task_struct *t) 2252int audit_signal_info(int sig, struct task_struct *t)
2253{ 2253{
2254 struct audit_aux_data_pids *axp; 2254 struct audit_aux_data_pids *axp;
2255 struct task_struct *tsk = current; 2255 struct task_struct *tsk = current;
2256 struct audit_context *ctx = tsk->audit_context; 2256 struct audit_context *ctx = tsk->audit_context;
2257 kuid_t uid = current_uid(), t_uid = task_uid(t); 2257 kuid_t uid = current_uid(), t_uid = task_uid(t);
2258 2258
2259 if (audit_pid && t->tgid == audit_pid) { 2259 if (auditd_test_task(t) &&
2260 if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { 2260 (sig == SIGTERM || sig == SIGHUP ||
2261 audit_sig_pid = task_tgid_nr(tsk); 2261 sig == SIGUSR1 || sig == SIGUSR2)) {
2262 if (uid_valid(tsk->loginuid)) 2262 audit_sig_pid = task_tgid_nr(tsk);
2263 audit_sig_uid = tsk->loginuid; 2263 if (uid_valid(tsk->loginuid))
2264 else 2264 audit_sig_uid = tsk->loginuid;
2265 audit_sig_uid = uid; 2265 else
2266 security_task_getsecid(tsk, &audit_sig_sid); 2266 audit_sig_uid = uid;
2267 } 2267 security_task_getsecid(tsk, &audit_sig_sid);
2268 if (!audit_signals || audit_dummy_context())
2269 return 0;
2270 } 2268 }
2271 2269
2270 if (!audit_signals || audit_dummy_context())
2271 return 0;
2272
2272 /* optimize the common case by putting first signal recipient directly 2273 /* optimize the common case by putting first signal recipient directly
2273 * in audit_context */ 2274 * in audit_context */
2274 if (!ctx->target_pid) { 2275 if (!ctx->target_pid) {
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index f45827e205d3..b4f1cb0c5ac7 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1162,12 +1162,12 @@ out:
1162 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */ 1162 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
1163 off = IMM; 1163 off = IMM;
1164load_word: 1164load_word:
1165 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are 1165 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
1166 * only appearing in the programs where ctx == 1166 * appearing in the programs where ctx == skb
1167 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX] 1167 * (see may_access_skb() in the verifier). All programs
1168 * == BPF_R6, bpf_convert_filter() saves it in BPF_R6, 1168 * keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6,
1169 * internal BPF verifier will check that BPF_R6 == 1169 * bpf_convert_filter() saves it in BPF_R6, internal BPF
1170 * ctx. 1170 * verifier will check that BPF_R6 == ctx.
1171 * 1171 *
1172 * BPF_ABS and BPF_IND are wrappers of function calls, 1172 * BPF_ABS and BPF_IND are wrappers of function calls,
1173 * so they scratch BPF_R1-BPF_R5 registers, preserve 1173 * so they scratch BPF_R1-BPF_R5 registers, preserve
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index afe5bab376c9..361a69dfe543 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -30,18 +30,12 @@ struct bpf_htab {
30 struct pcpu_freelist freelist; 30 struct pcpu_freelist freelist;
31 struct bpf_lru lru; 31 struct bpf_lru lru;
32 }; 32 };
33 void __percpu *extra_elems; 33 struct htab_elem *__percpu *extra_elems;
34 atomic_t count; /* number of elements in this hashtable */ 34 atomic_t count; /* number of elements in this hashtable */
35 u32 n_buckets; /* number of hash buckets */ 35 u32 n_buckets; /* number of hash buckets */
36 u32 elem_size; /* size of each element in bytes */ 36 u32 elem_size; /* size of each element in bytes */
37}; 37};
38 38
39enum extra_elem_state {
40 HTAB_NOT_AN_EXTRA_ELEM = 0,
41 HTAB_EXTRA_ELEM_FREE,
42 HTAB_EXTRA_ELEM_USED
43};
44
45/* each htab element is struct htab_elem + key + value */ 39/* each htab element is struct htab_elem + key + value */
46struct htab_elem { 40struct htab_elem {
47 union { 41 union {
@@ -56,7 +50,6 @@ struct htab_elem {
56 }; 50 };
57 union { 51 union {
58 struct rcu_head rcu; 52 struct rcu_head rcu;
59 enum extra_elem_state state;
60 struct bpf_lru_node lru_node; 53 struct bpf_lru_node lru_node;
61 }; 54 };
62 u32 hash; 55 u32 hash;
@@ -77,6 +70,11 @@ static bool htab_is_percpu(const struct bpf_htab *htab)
77 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; 70 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
78} 71}
79 72
73static bool htab_is_prealloc(const struct bpf_htab *htab)
74{
75 return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
76}
77
80static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size, 78static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
81 void __percpu *pptr) 79 void __percpu *pptr)
82{ 80{
@@ -128,17 +126,20 @@ static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
128 126
129static int prealloc_init(struct bpf_htab *htab) 127static int prealloc_init(struct bpf_htab *htab)
130{ 128{
129 u32 num_entries = htab->map.max_entries;
131 int err = -ENOMEM, i; 130 int err = -ENOMEM, i;
132 131
133 htab->elems = bpf_map_area_alloc(htab->elem_size * 132 if (!htab_is_percpu(htab) && !htab_is_lru(htab))
134 htab->map.max_entries); 133 num_entries += num_possible_cpus();
134
135 htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries);
135 if (!htab->elems) 136 if (!htab->elems)
136 return -ENOMEM; 137 return -ENOMEM;
137 138
138 if (!htab_is_percpu(htab)) 139 if (!htab_is_percpu(htab))
139 goto skip_percpu_elems; 140 goto skip_percpu_elems;
140 141
141 for (i = 0; i < htab->map.max_entries; i++) { 142 for (i = 0; i < num_entries; i++) {
142 u32 size = round_up(htab->map.value_size, 8); 143 u32 size = round_up(htab->map.value_size, 8);
143 void __percpu *pptr; 144 void __percpu *pptr;
144 145
@@ -166,11 +167,11 @@ skip_percpu_elems:
166 if (htab_is_lru(htab)) 167 if (htab_is_lru(htab))
167 bpf_lru_populate(&htab->lru, htab->elems, 168 bpf_lru_populate(&htab->lru, htab->elems,
168 offsetof(struct htab_elem, lru_node), 169 offsetof(struct htab_elem, lru_node),
169 htab->elem_size, htab->map.max_entries); 170 htab->elem_size, num_entries);
170 else 171 else
171 pcpu_freelist_populate(&htab->freelist, 172 pcpu_freelist_populate(&htab->freelist,
172 htab->elems + offsetof(struct htab_elem, fnode), 173 htab->elems + offsetof(struct htab_elem, fnode),
173 htab->elem_size, htab->map.max_entries); 174 htab->elem_size, num_entries);
174 175
175 return 0; 176 return 0;
176 177
@@ -191,16 +192,22 @@ static void prealloc_destroy(struct bpf_htab *htab)
191 192
192static int alloc_extra_elems(struct bpf_htab *htab) 193static int alloc_extra_elems(struct bpf_htab *htab)
193{ 194{
194 void __percpu *pptr; 195 struct htab_elem *__percpu *pptr, *l_new;
196 struct pcpu_freelist_node *l;
195 int cpu; 197 int cpu;
196 198
197 pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN); 199 pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8,
200 GFP_USER | __GFP_NOWARN);
198 if (!pptr) 201 if (!pptr)
199 return -ENOMEM; 202 return -ENOMEM;
200 203
201 for_each_possible_cpu(cpu) { 204 for_each_possible_cpu(cpu) {
202 ((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state = 205 l = pcpu_freelist_pop(&htab->freelist);
203 HTAB_EXTRA_ELEM_FREE; 206 /* pop will succeed, since prealloc_init()
207 * preallocated extra num_possible_cpus elements
208 */
209 l_new = container_of(l, struct htab_elem, fnode);
210 *per_cpu_ptr(pptr, cpu) = l_new;
204 } 211 }
205 htab->extra_elems = pptr; 212 htab->extra_elems = pptr;
206 return 0; 213 return 0;
@@ -342,25 +349,25 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
342 raw_spin_lock_init(&htab->buckets[i].lock); 349 raw_spin_lock_init(&htab->buckets[i].lock);
343 } 350 }
344 351
345 if (!percpu && !lru) {
346 /* lru itself can remove the least used element, so
347 * there is no need for an extra elem during map_update.
348 */
349 err = alloc_extra_elems(htab);
350 if (err)
351 goto free_buckets;
352 }
353
354 if (prealloc) { 352 if (prealloc) {
355 err = prealloc_init(htab); 353 err = prealloc_init(htab);
356 if (err) 354 if (err)
357 goto free_extra_elems; 355 goto free_buckets;
356
357 if (!percpu && !lru) {
358 /* lru itself can remove the least used element, so
359 * there is no need for an extra elem during map_update.
360 */
361 err = alloc_extra_elems(htab);
362 if (err)
363 goto free_prealloc;
364 }
358 } 365 }
359 366
360 return &htab->map; 367 return &htab->map;
361 368
362free_extra_elems: 369free_prealloc:
363 free_percpu(htab->extra_elems); 370 prealloc_destroy(htab);
364free_buckets: 371free_buckets:
365 bpf_map_area_free(htab->buckets); 372 bpf_map_area_free(htab->buckets);
366free_htab: 373free_htab:
@@ -575,12 +582,7 @@ static void htab_elem_free_rcu(struct rcu_head *head)
575 582
576static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) 583static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
577{ 584{
578 if (l->state == HTAB_EXTRA_ELEM_USED) { 585 if (htab_is_prealloc(htab)) {
579 l->state = HTAB_EXTRA_ELEM_FREE;
580 return;
581 }
582
583 if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) {
584 pcpu_freelist_push(&htab->freelist, &l->fnode); 586 pcpu_freelist_push(&htab->freelist, &l->fnode);
585 } else { 587 } else {
586 atomic_dec(&htab->count); 588 atomic_dec(&htab->count);
@@ -610,47 +612,43 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
610static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, 612static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
611 void *value, u32 key_size, u32 hash, 613 void *value, u32 key_size, u32 hash,
612 bool percpu, bool onallcpus, 614 bool percpu, bool onallcpus,
613 bool old_elem_exists) 615 struct htab_elem *old_elem)
614{ 616{
615 u32 size = htab->map.value_size; 617 u32 size = htab->map.value_size;
616 bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC); 618 bool prealloc = htab_is_prealloc(htab);
617 struct htab_elem *l_new; 619 struct htab_elem *l_new, **pl_new;
618 void __percpu *pptr; 620 void __percpu *pptr;
619 int err = 0;
620 621
621 if (prealloc) { 622 if (prealloc) {
622 struct pcpu_freelist_node *l; 623 if (old_elem) {
624 /* if we're updating the existing element,
625 * use per-cpu extra elems to avoid freelist_pop/push
626 */
627 pl_new = this_cpu_ptr(htab->extra_elems);
628 l_new = *pl_new;
629 *pl_new = old_elem;
630 } else {
631 struct pcpu_freelist_node *l;
623 632
624 l = pcpu_freelist_pop(&htab->freelist); 633 l = pcpu_freelist_pop(&htab->freelist);
625 if (!l) 634 if (!l)
626 err = -E2BIG; 635 return ERR_PTR(-E2BIG);
627 else
628 l_new = container_of(l, struct htab_elem, fnode); 636 l_new = container_of(l, struct htab_elem, fnode);
629 } else {
630 if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
631 atomic_dec(&htab->count);
632 err = -E2BIG;
633 } else {
634 l_new = kmalloc(htab->elem_size,
635 GFP_ATOMIC | __GFP_NOWARN);
636 if (!l_new)
637 return ERR_PTR(-ENOMEM);
638 } 637 }
639 }
640
641 if (err) {
642 if (!old_elem_exists)
643 return ERR_PTR(err);
644
645 /* if we're updating the existing element and the hash table
646 * is full, use per-cpu extra elems
647 */
648 l_new = this_cpu_ptr(htab->extra_elems);
649 if (l_new->state != HTAB_EXTRA_ELEM_FREE)
650 return ERR_PTR(-E2BIG);
651 l_new->state = HTAB_EXTRA_ELEM_USED;
652 } else { 638 } else {
653 l_new->state = HTAB_NOT_AN_EXTRA_ELEM; 639 if (atomic_inc_return(&htab->count) > htab->map.max_entries)
640 if (!old_elem) {
641 /* when map is full and update() is replacing
642 * old element, it's ok to allocate, since
643 * old element will be freed immediately.
644 * Otherwise return an error
645 */
646 atomic_dec(&htab->count);
647 return ERR_PTR(-E2BIG);
648 }
649 l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
650 if (!l_new)
651 return ERR_PTR(-ENOMEM);
654 } 652 }
655 653
656 memcpy(l_new->key, key, key_size); 654 memcpy(l_new->key, key, key_size);
@@ -731,7 +729,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
731 goto err; 729 goto err;
732 730
733 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, 731 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
734 !!l_old); 732 l_old);
735 if (IS_ERR(l_new)) { 733 if (IS_ERR(l_new)) {
736 /* all pre-allocated elements are in use or memory exhausted */ 734 /* all pre-allocated elements are in use or memory exhausted */
737 ret = PTR_ERR(l_new); 735 ret = PTR_ERR(l_new);
@@ -744,7 +742,8 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
744 hlist_nulls_add_head_rcu(&l_new->hash_node, head); 742 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
745 if (l_old) { 743 if (l_old) {
746 hlist_nulls_del_rcu(&l_old->hash_node); 744 hlist_nulls_del_rcu(&l_old->hash_node);
747 free_htab_elem(htab, l_old); 745 if (!htab_is_prealloc(htab))
746 free_htab_elem(htab, l_old);
748 } 747 }
749 ret = 0; 748 ret = 0;
750err: 749err:
@@ -856,7 +855,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
856 value, onallcpus); 855 value, onallcpus);
857 } else { 856 } else {
858 l_new = alloc_htab_elem(htab, key, value, key_size, 857 l_new = alloc_htab_elem(htab, key, value, key_size,
859 hash, true, onallcpus, false); 858 hash, true, onallcpus, NULL);
860 if (IS_ERR(l_new)) { 859 if (IS_ERR(l_new)) {
861 ret = PTR_ERR(l_new); 860 ret = PTR_ERR(l_new);
862 goto err; 861 goto err;
@@ -1024,8 +1023,7 @@ static void delete_all_elements(struct bpf_htab *htab)
1024 1023
1025 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { 1024 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1026 hlist_nulls_del_rcu(&l->hash_node); 1025 hlist_nulls_del_rcu(&l->hash_node);
1027 if (l->state != HTAB_EXTRA_ELEM_USED) 1026 htab_elem_free(htab, l);
1028 htab_elem_free(htab, l);
1029 } 1027 }
1030 } 1028 }
1031} 1029}
@@ -1045,7 +1043,7 @@ static void htab_map_free(struct bpf_map *map)
1045 * not have executed. Wait for them. 1043 * not have executed. Wait for them.
1046 */ 1044 */
1047 rcu_barrier(); 1045 rcu_barrier();
1048 if (htab->map.map_flags & BPF_F_NO_PREALLOC) 1046 if (!htab_is_prealloc(htab))
1049 delete_all_elements(htab); 1047 delete_all_elements(htab);
1050 else 1048 else
1051 prealloc_destroy(htab); 1049 prealloc_destroy(htab);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 7af0dcc5d755..821f9e807de5 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -617,6 +617,14 @@ static void fixup_bpf_calls(struct bpf_prog *prog)
617 if (insn->imm == BPF_FUNC_xdp_adjust_head) 617 if (insn->imm == BPF_FUNC_xdp_adjust_head)
618 prog->xdp_adjust_head = 1; 618 prog->xdp_adjust_head = 1;
619 if (insn->imm == BPF_FUNC_tail_call) { 619 if (insn->imm == BPF_FUNC_tail_call) {
620 /* If we tail call into other programs, we
621 * cannot make any assumptions since they
622 * can be replaced dynamically during runtime
623 * in the program array.
624 */
625 prog->cb_access = 1;
626 prog->xdp_adjust_head = 1;
627
620 /* mark bpf_tail_call as different opcode 628 /* mark bpf_tail_call as different opcode
621 * to avoid conditional branch in 629 * to avoid conditional branch in
622 * interpeter for every normal call 630 * interpeter for every normal call
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 796b68d00119..a834068a400e 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -765,38 +765,56 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
765 } 765 }
766} 766}
767 767
768static int check_ptr_alignment(struct bpf_verifier_env *env, 768static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
769 struct bpf_reg_state *reg, int off, int size) 769 int off, int size)
770{ 770{
771 if (reg->type != PTR_TO_PACKET && reg->type != PTR_TO_MAP_VALUE_ADJ) {
772 if (off % size != 0) {
773 verbose("misaligned access off %d size %d\n",
774 off, size);
775 return -EACCES;
776 } else {
777 return 0;
778 }
779 }
780
781 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
782 /* misaligned access to packet is ok on x86,arm,arm64 */
783 return 0;
784
785 if (reg->id && size != 1) { 771 if (reg->id && size != 1) {
786 verbose("Unknown packet alignment. Only byte-sized access allowed\n"); 772 verbose("Unknown alignment. Only byte-sized access allowed in packet access.\n");
787 return -EACCES; 773 return -EACCES;
788 } 774 }
789 775
790 /* skb->data is NET_IP_ALIGN-ed */ 776 /* skb->data is NET_IP_ALIGN-ed */
791 if (reg->type == PTR_TO_PACKET && 777 if ((NET_IP_ALIGN + reg->off + off) % size != 0) {
792 (NET_IP_ALIGN + reg->off + off) % size != 0) {
793 verbose("misaligned packet access off %d+%d+%d size %d\n", 778 verbose("misaligned packet access off %d+%d+%d size %d\n",
794 NET_IP_ALIGN, reg->off, off, size); 779 NET_IP_ALIGN, reg->off, off, size);
795 return -EACCES; 780 return -EACCES;
796 } 781 }
782
797 return 0; 783 return 0;
798} 784}
799 785
786static int check_val_ptr_alignment(const struct bpf_reg_state *reg,
787 int size)
788{
789 if (size != 1) {
790 verbose("Unknown alignment. Only byte-sized access allowed in value access.\n");
791 return -EACCES;
792 }
793
794 return 0;
795}
796
797static int check_ptr_alignment(const struct bpf_reg_state *reg,
798 int off, int size)
799{
800 switch (reg->type) {
801 case PTR_TO_PACKET:
802 return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 :
803 check_pkt_ptr_alignment(reg, off, size);
804 case PTR_TO_MAP_VALUE_ADJ:
805 return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 :
806 check_val_ptr_alignment(reg, size);
807 default:
808 if (off % size != 0) {
809 verbose("misaligned access off %d size %d\n",
810 off, size);
811 return -EACCES;
812 }
813
814 return 0;
815 }
816}
817
800/* check whether memory at (regno + off) is accessible for t = (read | write) 818/* check whether memory at (regno + off) is accessible for t = (read | write)
801 * if t==write, value_regno is a register which value is stored into memory 819 * if t==write, value_regno is a register which value is stored into memory
802 * if t==read, value_regno is a register which will receive the value from memory 820 * if t==read, value_regno is a register which will receive the value from memory
@@ -818,7 +836,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
818 if (size < 0) 836 if (size < 0)
819 return size; 837 return size;
820 838
821 err = check_ptr_alignment(env, reg, off, size); 839 err = check_ptr_alignment(reg, off, size);
822 if (err) 840 if (err)
823 return err; 841 return err;
824 842
@@ -1925,6 +1943,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
1925 * register as unknown. 1943 * register as unknown.
1926 */ 1944 */
1927 if (env->allow_ptr_leaks && 1945 if (env->allow_ptr_leaks &&
1946 BPF_CLASS(insn->code) == BPF_ALU64 && opcode == BPF_ADD &&
1928 (dst_reg->type == PTR_TO_MAP_VALUE || 1947 (dst_reg->type == PTR_TO_MAP_VALUE ||
1929 dst_reg->type == PTR_TO_MAP_VALUE_ADJ)) 1948 dst_reg->type == PTR_TO_MAP_VALUE_ADJ))
1930 dst_reg->type = PTR_TO_MAP_VALUE_ADJ; 1949 dst_reg->type = PTR_TO_MAP_VALUE_ADJ;
@@ -1973,14 +1992,15 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
1973 1992
1974 for (i = 0; i < MAX_BPF_REG; i++) 1993 for (i = 0; i < MAX_BPF_REG; i++)
1975 if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id) 1994 if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
1976 regs[i].range = dst_reg->off; 1995 /* keep the maximum range already checked */
1996 regs[i].range = max(regs[i].range, dst_reg->off);
1977 1997
1978 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 1998 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
1979 if (state->stack_slot_type[i] != STACK_SPILL) 1999 if (state->stack_slot_type[i] != STACK_SPILL)
1980 continue; 2000 continue;
1981 reg = &state->spilled_regs[i / BPF_REG_SIZE]; 2001 reg = &state->spilled_regs[i / BPF_REG_SIZE];
1982 if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id) 2002 if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
1983 reg->range = dst_reg->off; 2003 reg->range = max(reg->range, dst_reg->off);
1984 } 2004 }
1985} 2005}
1986 2006
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 48851327a15e..687f5e0194ef 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -2425,11 +2425,12 @@ ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
2425 tsk = tsk->group_leader; 2425 tsk = tsk->group_leader;
2426 2426
2427 /* 2427 /*
2428 * Workqueue threads may acquire PF_NO_SETAFFINITY and become 2428 * kthreads may acquire PF_NO_SETAFFINITY during initialization.
2429 * trapped in a cpuset, or RT worker may be born in a cgroup 2429 * If userland migrates such a kthread to a non-root cgroup, it can
2430 * with no rt_runtime allocated. Just say no. 2430 * become trapped in a cpuset, or RT kthread may be born in a
2431 * cgroup with no rt_runtime allocated. Just say no.
2431 */ 2432 */
2432 if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) { 2433 if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
2433 ret = -EINVAL; 2434 ret = -EINVAL;
2434 goto out_unlock_rcu; 2435 goto out_unlock_rcu;
2435 } 2436 }
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index 4544b115f5eb..e2d356dd7581 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -59,7 +59,7 @@ static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk)
59struct cpumask * 59struct cpumask *
60irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) 60irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
61{ 61{
62 int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec; 62 int n, nodes, cpus_per_vec, extra_vecs, curvec;
63 int affv = nvecs - affd->pre_vectors - affd->post_vectors; 63 int affv = nvecs - affd->pre_vectors - affd->post_vectors;
64 int last_affv = affv + affd->pre_vectors; 64 int last_affv = affv + affd->pre_vectors;
65 nodemask_t nodemsk = NODE_MASK_NONE; 65 nodemask_t nodemsk = NODE_MASK_NONE;
@@ -94,19 +94,21 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
94 goto done; 94 goto done;
95 } 95 }
96 96
97 /* Spread the vectors per node */
98 vecs_per_node = affv / nodes;
99 /* Account for rounding errors */
100 extra_vecs = affv - (nodes * vecs_per_node);
101
102 for_each_node_mask(n, nodemsk) { 97 for_each_node_mask(n, nodemsk) {
103 int ncpus, v, vecs_to_assign = vecs_per_node; 98 int ncpus, v, vecs_to_assign, vecs_per_node;
99
100 /* Spread the vectors per node */
101 vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes;
104 102
105 /* Get the cpus on this node which are in the mask */ 103 /* Get the cpus on this node which are in the mask */
106 cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n)); 104 cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n));
107 105
108 /* Calculate the number of cpus per vector */ 106 /* Calculate the number of cpus per vector */
109 ncpus = cpumask_weight(nmsk); 107 ncpus = cpumask_weight(nmsk);
108 vecs_to_assign = min(vecs_per_node, ncpus);
109
110 /* Account for rounding errors */
111 extra_vecs = ncpus - vecs_to_assign * (ncpus / vecs_to_assign);
110 112
111 for (v = 0; curvec < last_affv && v < vecs_to_assign; 113 for (v = 0; curvec < last_affv && v < vecs_to_assign;
112 curvec++, v++) { 114 curvec++, v++) {
@@ -115,14 +117,14 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
115 /* Account for extra vectors to compensate rounding errors */ 117 /* Account for extra vectors to compensate rounding errors */
116 if (extra_vecs) { 118 if (extra_vecs) {
117 cpus_per_vec++; 119 cpus_per_vec++;
118 if (!--extra_vecs) 120 --extra_vecs;
119 vecs_per_node++;
120 } 121 }
121 irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec); 122 irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
122 } 123 }
123 124
124 if (curvec >= last_affv) 125 if (curvec >= last_affv)
125 break; 126 break;
127 --nodes;
126 } 128 }
127 129
128done: 130done:
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 2f26adea0f84..26db528c1d88 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -20,6 +20,7 @@
20#include <linux/freezer.h> 20#include <linux/freezer.h>
21#include <linux/ptrace.h> 21#include <linux/ptrace.h>
22#include <linux/uaccess.h> 22#include <linux/uaccess.h>
23#include <linux/cgroup.h>
23#include <trace/events/sched.h> 24#include <trace/events/sched.h>
24 25
25static DEFINE_SPINLOCK(kthread_create_lock); 26static DEFINE_SPINLOCK(kthread_create_lock);
@@ -225,6 +226,7 @@ static int kthread(void *_create)
225 226
226 ret = -EINTR; 227 ret = -EINTR;
227 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { 228 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
229 cgroup_kthread_ready();
228 __kthread_parkme(self); 230 __kthread_parkme(self);
229 ret = threadfn(data); 231 ret = threadfn(data);
230 } 232 }
@@ -538,6 +540,7 @@ int kthreadd(void *unused)
538 set_mems_allowed(node_states[N_MEMORY]); 540 set_mems_allowed(node_states[N_MEMORY]);
539 541
540 current->flags |= PF_NOFREEZE; 542 current->flags |= PF_NOFREEZE;
543 cgroup_init_kthreadd();
541 544
542 for (;;) { 545 for (;;) {
543 set_current_state(TASK_INTERRUPTIBLE); 546 set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
index c2b88490d857..c08fbd2f5ba9 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -46,13 +46,13 @@ enum {
46 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) 46 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
47 47
48/* 48/*
49 * CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text, 49 * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
50 * .data and .bss to fit in required 32MB limit for the kernel. With 50 * .data and .bss to fit in required 32MB limit for the kernel. With
51 * PROVE_LOCKING we could go over this limit and cause system boot-up problems. 51 * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
52 * So, reduce the static allocations for lockdeps related structures so that 52 * So, reduce the static allocations for lockdeps related structures so that
53 * everything fits in current required size limit. 53 * everything fits in current required size limit.
54 */ 54 */
55#ifdef CONFIG_PROVE_LOCKING_SMALL 55#ifdef CONFIG_LOCKDEP_SMALL
56/* 56/*
57 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies 57 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
58 * we track. 58 * we track.
diff --git a/kernel/padata.c b/kernel/padata.c
index 05316c9f32da..3202aa17492c 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -186,19 +186,20 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
186 186
187 reorder = &next_queue->reorder; 187 reorder = &next_queue->reorder;
188 188
189 spin_lock(&reorder->lock);
189 if (!list_empty(&reorder->list)) { 190 if (!list_empty(&reorder->list)) {
190 padata = list_entry(reorder->list.next, 191 padata = list_entry(reorder->list.next,
191 struct padata_priv, list); 192 struct padata_priv, list);
192 193
193 spin_lock(&reorder->lock);
194 list_del_init(&padata->list); 194 list_del_init(&padata->list);
195 atomic_dec(&pd->reorder_objects); 195 atomic_dec(&pd->reorder_objects);
196 spin_unlock(&reorder->lock);
197 196
198 pd->processed++; 197 pd->processed++;
199 198
199 spin_unlock(&reorder->lock);
200 goto out; 200 goto out;
201 } 201 }
202 spin_unlock(&reorder->lock);
202 203
203 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) { 204 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
204 padata = ERR_PTR(-ENODATA); 205 padata = ERR_PTR(-ENODATA);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 0af928712174..266ddcc1d8bb 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -184,11 +184,17 @@ static void ptrace_unfreeze_traced(struct task_struct *task)
184 184
185 WARN_ON(!task->ptrace || task->parent != current); 185 WARN_ON(!task->ptrace || task->parent != current);
186 186
187 /*
188 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
189 * Recheck state under the lock to close this race.
190 */
187 spin_lock_irq(&task->sighand->siglock); 191 spin_lock_irq(&task->sighand->siglock);
188 if (__fatal_signal_pending(task)) 192 if (task->state == __TASK_TRACED) {
189 wake_up_state(task, __TASK_TRACED); 193 if (__fatal_signal_pending(task))
190 else 194 wake_up_state(task, __TASK_TRACED);
191 task->state = TASK_TRACED; 195 else
196 task->state = TASK_TRACED;
197 }
192 spin_unlock_irq(&task->sighand->siglock); 198 spin_unlock_irq(&task->sighand->siglock);
193} 199}
194 200
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index a08795e21628..00a45c45beca 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -96,10 +96,10 @@ static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable);
96static int __sched_clock_stable_early = 1; 96static int __sched_clock_stable_early = 1;
97 97
98/* 98/*
99 * We want: ktime_get_ns() + gtod_offset == sched_clock() + raw_offset 99 * We want: ktime_get_ns() + __gtod_offset == sched_clock() + __sched_clock_offset
100 */ 100 */
101static __read_mostly u64 raw_offset; 101__read_mostly u64 __sched_clock_offset;
102static __read_mostly u64 gtod_offset; 102static __read_mostly u64 __gtod_offset;
103 103
104struct sched_clock_data { 104struct sched_clock_data {
105 u64 tick_raw; 105 u64 tick_raw;
@@ -131,17 +131,24 @@ static void __set_sched_clock_stable(void)
131 /* 131 /*
132 * Attempt to make the (initial) unstable->stable transition continuous. 132 * Attempt to make the (initial) unstable->stable transition continuous.
133 */ 133 */
134 raw_offset = (scd->tick_gtod + gtod_offset) - (scd->tick_raw); 134 __sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw);
135 135
136 printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n", 136 printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n",
137 scd->tick_gtod, gtod_offset, 137 scd->tick_gtod, __gtod_offset,
138 scd->tick_raw, raw_offset); 138 scd->tick_raw, __sched_clock_offset);
139 139
140 static_branch_enable(&__sched_clock_stable); 140 static_branch_enable(&__sched_clock_stable);
141 tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE); 141 tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
142} 142}
143 143
144static void __clear_sched_clock_stable(struct work_struct *work) 144static void __sched_clock_work(struct work_struct *work)
145{
146 static_branch_disable(&__sched_clock_stable);
147}
148
149static DECLARE_WORK(sched_clock_work, __sched_clock_work);
150
151static void __clear_sched_clock_stable(void)
145{ 152{
146 struct sched_clock_data *scd = this_scd(); 153 struct sched_clock_data *scd = this_scd();
147 154
@@ -154,17 +161,17 @@ static void __clear_sched_clock_stable(struct work_struct *work)
154 * 161 *
155 * Still do what we can. 162 * Still do what we can.
156 */ 163 */
157 gtod_offset = (scd->tick_raw + raw_offset) - (scd->tick_gtod); 164 __gtod_offset = (scd->tick_raw + __sched_clock_offset) - (scd->tick_gtod);
158 165
159 printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n", 166 printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n",
160 scd->tick_gtod, gtod_offset, 167 scd->tick_gtod, __gtod_offset,
161 scd->tick_raw, raw_offset); 168 scd->tick_raw, __sched_clock_offset);
162 169
163 static_branch_disable(&__sched_clock_stable);
164 tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE); 170 tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE);
165}
166 171
167static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable); 172 if (sched_clock_stable())
173 schedule_work(&sched_clock_work);
174}
168 175
169void clear_sched_clock_stable(void) 176void clear_sched_clock_stable(void)
170{ 177{
@@ -173,7 +180,7 @@ void clear_sched_clock_stable(void)
173 smp_mb(); /* matches sched_clock_init_late() */ 180 smp_mb(); /* matches sched_clock_init_late() */
174 181
175 if (sched_clock_running == 2) 182 if (sched_clock_running == 2)
176 schedule_work(&sched_clock_work); 183 __clear_sched_clock_stable();
177} 184}
178 185
179void sched_clock_init_late(void) 186void sched_clock_init_late(void)
@@ -214,7 +221,7 @@ static inline u64 wrap_max(u64 x, u64 y)
214 */ 221 */
215static u64 sched_clock_local(struct sched_clock_data *scd) 222static u64 sched_clock_local(struct sched_clock_data *scd)
216{ 223{
217 u64 now, clock, old_clock, min_clock, max_clock; 224 u64 now, clock, old_clock, min_clock, max_clock, gtod;
218 s64 delta; 225 s64 delta;
219 226
220again: 227again:
@@ -231,9 +238,10 @@ again:
231 * scd->tick_gtod + TICK_NSEC); 238 * scd->tick_gtod + TICK_NSEC);
232 */ 239 */
233 240
234 clock = scd->tick_gtod + gtod_offset + delta; 241 gtod = scd->tick_gtod + __gtod_offset;
235 min_clock = wrap_max(scd->tick_gtod, old_clock); 242 clock = gtod + delta;
236 max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC); 243 min_clock = wrap_max(gtod, old_clock);
244 max_clock = wrap_max(old_clock, gtod + TICK_NSEC);
237 245
238 clock = wrap_max(clock, min_clock); 246 clock = wrap_max(clock, min_clock);
239 clock = wrap_min(clock, max_clock); 247 clock = wrap_min(clock, max_clock);
@@ -317,7 +325,7 @@ u64 sched_clock_cpu(int cpu)
317 u64 clock; 325 u64 clock;
318 326
319 if (sched_clock_stable()) 327 if (sched_clock_stable())
320 return sched_clock() + raw_offset; 328 return sched_clock() + __sched_clock_offset;
321 329
322 if (unlikely(!sched_clock_running)) 330 if (unlikely(!sched_clock_running))
323 return 0ull; 331 return 0ull;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index cd7cd489f739..54c577578da6 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -584,20 +584,14 @@ static int sugov_start(struct cpufreq_policy *policy)
584 for_each_cpu(cpu, policy->cpus) { 584 for_each_cpu(cpu, policy->cpus) {
585 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); 585 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
586 586
587 memset(sg_cpu, 0, sizeof(*sg_cpu));
587 sg_cpu->sg_policy = sg_policy; 588 sg_cpu->sg_policy = sg_policy;
588 if (policy_is_shared(policy)) { 589 sg_cpu->flags = SCHED_CPUFREQ_RT;
589 sg_cpu->util = 0; 590 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
590 sg_cpu->max = 0; 591 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
591 sg_cpu->flags = SCHED_CPUFREQ_RT; 592 policy_is_shared(policy) ?
592 sg_cpu->last_update = 0; 593 sugov_update_shared :
593 sg_cpu->iowait_boost = 0; 594 sugov_update_single);
594 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
595 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
596 sugov_update_shared);
597 } else {
598 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
599 sugov_update_single);
600 }
601 } 595 }
602 return 0; 596 return 0;
603} 597}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index acf0a5a06da7..8c8714fcb53c 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2133,9 +2133,12 @@ static int do_proc_douintvec_conv(bool *negp, unsigned long *lvalp,
2133 if (write) { 2133 if (write) {
2134 if (*negp) 2134 if (*negp)
2135 return -EINVAL; 2135 return -EINVAL;
2136 if (*lvalp > UINT_MAX)
2137 return -EINVAL;
2136 *valp = *lvalp; 2138 *valp = *lvalp;
2137 } else { 2139 } else {
2138 unsigned int val = *valp; 2140 unsigned int val = *valp;
2141 *negp = false;
2139 *lvalp = (unsigned long)val; 2142 *lvalp = (unsigned long)val;
2140 } 2143 }
2141 return 0; 2144 return 0;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b9691ee8f6c1..dd3e91d68dc7 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3755,23 +3755,24 @@ static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
3755 ftrace_probe_registered = 1; 3755 ftrace_probe_registered = 1;
3756} 3756}
3757 3757
3758static void __disable_ftrace_function_probe(void) 3758static bool __disable_ftrace_function_probe(void)
3759{ 3759{
3760 int i; 3760 int i;
3761 3761
3762 if (!ftrace_probe_registered) 3762 if (!ftrace_probe_registered)
3763 return; 3763 return false;
3764 3764
3765 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { 3765 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3766 struct hlist_head *hhd = &ftrace_func_hash[i]; 3766 struct hlist_head *hhd = &ftrace_func_hash[i];
3767 if (hhd->first) 3767 if (hhd->first)
3768 return; 3768 return false;
3769 } 3769 }
3770 3770
3771 /* no more funcs left */ 3771 /* no more funcs left */
3772 ftrace_shutdown(&trace_probe_ops, 0); 3772 ftrace_shutdown(&trace_probe_ops, 0);
3773 3773
3774 ftrace_probe_registered = 0; 3774 ftrace_probe_registered = 0;
3775 return true;
3775} 3776}
3776 3777
3777 3778
@@ -3901,6 +3902,7 @@ static void
3901__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, 3902__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3902 void *data, int flags) 3903 void *data, int flags)
3903{ 3904{
3905 struct ftrace_ops_hash old_hash_ops;
3904 struct ftrace_func_entry *rec_entry; 3906 struct ftrace_func_entry *rec_entry;
3905 struct ftrace_func_probe *entry; 3907 struct ftrace_func_probe *entry;
3906 struct ftrace_func_probe *p; 3908 struct ftrace_func_probe *p;
@@ -3912,6 +3914,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3912 struct hlist_node *tmp; 3914 struct hlist_node *tmp;
3913 char str[KSYM_SYMBOL_LEN]; 3915 char str[KSYM_SYMBOL_LEN];
3914 int i, ret; 3916 int i, ret;
3917 bool disabled;
3915 3918
3916 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) 3919 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3917 func_g.search = NULL; 3920 func_g.search = NULL;
@@ -3930,6 +3933,10 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3930 3933
3931 mutex_lock(&trace_probe_ops.func_hash->regex_lock); 3934 mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3932 3935
3936 old_hash_ops.filter_hash = old_hash;
3937 /* Probes only have filters */
3938 old_hash_ops.notrace_hash = NULL;
3939
3933 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3940 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3934 if (!hash) 3941 if (!hash)
3935 /* Hmm, should report this somehow */ 3942 /* Hmm, should report this somehow */
@@ -3967,12 +3974,17 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3967 } 3974 }
3968 } 3975 }
3969 mutex_lock(&ftrace_lock); 3976 mutex_lock(&ftrace_lock);
3970 __disable_ftrace_function_probe(); 3977 disabled = __disable_ftrace_function_probe();
3971 /* 3978 /*
3972 * Remove after the disable is called. Otherwise, if the last 3979 * Remove after the disable is called. Otherwise, if the last
3973 * probe is removed, a null hash means *all enabled*. 3980 * probe is removed, a null hash means *all enabled*.
3974 */ 3981 */
3975 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); 3982 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3983
3984 /* still need to update the function call sites */
3985 if (ftrace_enabled && !disabled)
3986 ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
3987 &old_hash_ops);
3976 synchronize_sched(); 3988 synchronize_sched();
3977 if (!ret) 3989 if (!ret)
3978 free_ftrace_hash_rcu(old_hash); 3990 free_ftrace_hash_rcu(old_hash);
@@ -5554,6 +5566,15 @@ static void clear_ftrace_pids(struct trace_array *tr)
5554 trace_free_pid_list(pid_list); 5566 trace_free_pid_list(pid_list);
5555} 5567}
5556 5568
5569void ftrace_clear_pids(struct trace_array *tr)
5570{
5571 mutex_lock(&ftrace_lock);
5572
5573 clear_ftrace_pids(tr);
5574
5575 mutex_unlock(&ftrace_lock);
5576}
5577
5557static void ftrace_pid_reset(struct trace_array *tr) 5578static void ftrace_pid_reset(struct trace_array *tr)
5558{ 5579{
5559 mutex_lock(&ftrace_lock); 5580 mutex_lock(&ftrace_lock);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 96fc3c043ad6..ca47a4fa2986 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3405,11 +3405,23 @@ EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
3405int ring_buffer_iter_empty(struct ring_buffer_iter *iter) 3405int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
3406{ 3406{
3407 struct ring_buffer_per_cpu *cpu_buffer; 3407 struct ring_buffer_per_cpu *cpu_buffer;
3408 struct buffer_page *reader;
3409 struct buffer_page *head_page;
3410 struct buffer_page *commit_page;
3411 unsigned commit;
3408 3412
3409 cpu_buffer = iter->cpu_buffer; 3413 cpu_buffer = iter->cpu_buffer;
3410 3414
3411 return iter->head_page == cpu_buffer->commit_page && 3415 /* Remember, trace recording is off when iterator is in use */
3412 iter->head == rb_commit_index(cpu_buffer); 3416 reader = cpu_buffer->reader_page;
3417 head_page = cpu_buffer->head_page;
3418 commit_page = cpu_buffer->commit_page;
3419 commit = rb_page_commit(commit_page);
3420
3421 return ((iter->head_page == commit_page && iter->head == commit) ||
3422 (iter->head_page == reader && commit_page == head_page &&
3423 head_page->read == commit &&
3424 iter->head == rb_page_commit(cpu_buffer->reader_page)));
3413} 3425}
3414EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); 3426EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
3415 3427
@@ -4826,9 +4838,9 @@ static __init int test_ringbuffer(void)
4826 rb_data[cpu].cnt = cpu; 4838 rb_data[cpu].cnt = cpu;
4827 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu], 4839 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
4828 "rbtester/%d", cpu); 4840 "rbtester/%d", cpu);
4829 if (WARN_ON(!rb_threads[cpu])) { 4841 if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
4830 pr_cont("FAILED\n"); 4842 pr_cont("FAILED\n");
4831 ret = -1; 4843 ret = PTR_ERR(rb_threads[cpu]);
4832 goto out_free; 4844 goto out_free;
4833 } 4845 }
4834 4846
@@ -4838,9 +4850,9 @@ static __init int test_ringbuffer(void)
4838 4850
4839 /* Now create the rb hammer! */ 4851 /* Now create the rb hammer! */
4840 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); 4852 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
4841 if (WARN_ON(!rb_hammer)) { 4853 if (WARN_ON(IS_ERR(rb_hammer))) {
4842 pr_cont("FAILED\n"); 4854 pr_cont("FAILED\n");
4843 ret = -1; 4855 ret = PTR_ERR(rb_hammer);
4844 goto out_free; 4856 goto out_free;
4845 } 4857 }
4846 4858
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f35109514a01..0ad75e9698f6 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6733,11 +6733,13 @@ ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6733 return ret; 6733 return ret;
6734 6734
6735 out_reg: 6735 out_reg:
6736 ret = register_ftrace_function_probe(glob, ops, count); 6736 ret = alloc_snapshot(&global_trace);
6737 if (ret < 0)
6738 goto out;
6737 6739
6738 if (ret >= 0) 6740 ret = register_ftrace_function_probe(glob, ops, count);
6739 alloc_snapshot(&global_trace);
6740 6741
6742 out:
6741 return ret < 0 ? ret : 0; 6743 return ret < 0 ? ret : 0;
6742} 6744}
6743 6745
@@ -7402,6 +7404,7 @@ static int instance_rmdir(const char *name)
7402 7404
7403 tracing_set_nop(tr); 7405 tracing_set_nop(tr);
7404 event_trace_del_tracer(tr); 7406 event_trace_del_tracer(tr);
7407 ftrace_clear_pids(tr);
7405 ftrace_destroy_function_files(tr); 7408 ftrace_destroy_function_files(tr);
7406 tracefs_remove_recursive(tr->dir); 7409 tracefs_remove_recursive(tr->dir);
7407 free_trace_buffers(tr); 7410 free_trace_buffers(tr);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index ae1cce91fead..d19d52d600d6 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -896,6 +896,7 @@ int using_ftrace_ops_list_func(void);
896void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer); 896void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
897void ftrace_init_tracefs_toplevel(struct trace_array *tr, 897void ftrace_init_tracefs_toplevel(struct trace_array *tr,
898 struct dentry *d_tracer); 898 struct dentry *d_tracer);
899void ftrace_clear_pids(struct trace_array *tr);
899#else 900#else
900static inline int ftrace_trace_task(struct trace_array *tr) 901static inline int ftrace_trace_task(struct trace_array *tr)
901{ 902{
@@ -914,6 +915,7 @@ ftrace_init_global_array_ops(struct trace_array *tr) { }
914static inline void ftrace_reset_array_ops(struct trace_array *tr) { } 915static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
915static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { } 916static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
916static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { } 917static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
918static inline void ftrace_clear_pids(struct trace_array *tr) { }
917/* ftace_func_t type is not defined, use macro instead of static inline */ 919/* ftace_func_t type is not defined, use macro instead of static inline */
918#define ftrace_init_array_ops(tr, func) do { } while (0) 920#define ftrace_init_array_ops(tr, func) do { } while (0)
919#endif /* CONFIG_FUNCTION_TRACER */ 921#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 97d62c2da6c2..fa16c0f82d6e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1103,9 +1103,6 @@ config PROVE_LOCKING
1103 1103
1104 For more details, see Documentation/locking/lockdep-design.txt. 1104 For more details, see Documentation/locking/lockdep-design.txt.
1105 1105
1106config PROVE_LOCKING_SMALL
1107 bool
1108
1109config LOCKDEP 1106config LOCKDEP
1110 bool 1107 bool
1111 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 1108 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -1114,6 +1111,9 @@ config LOCKDEP
1114 select KALLSYMS 1111 select KALLSYMS
1115 select KALLSYMS_ALL 1112 select KALLSYMS_ALL
1116 1113
1114config LOCKDEP_SMALL
1115 bool
1116
1117config LOCK_STAT 1117config LOCK_STAT
1118 bool "Lock usage statistics" 1118 bool "Lock usage statistics"
1119 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 1119 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index e68604ae3ced..60abc44385b7 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -786,6 +786,68 @@ void iov_iter_advance(struct iov_iter *i, size_t size)
786} 786}
787EXPORT_SYMBOL(iov_iter_advance); 787EXPORT_SYMBOL(iov_iter_advance);
788 788
789void iov_iter_revert(struct iov_iter *i, size_t unroll)
790{
791 if (!unroll)
792 return;
793 i->count += unroll;
794 if (unlikely(i->type & ITER_PIPE)) {
795 struct pipe_inode_info *pipe = i->pipe;
796 int idx = i->idx;
797 size_t off = i->iov_offset;
798 while (1) {
799 size_t n = off - pipe->bufs[idx].offset;
800 if (unroll < n) {
801 off -= (n - unroll);
802 break;
803 }
804 unroll -= n;
805 if (!unroll && idx == i->start_idx) {
806 off = 0;
807 break;
808 }
809 if (!idx--)
810 idx = pipe->buffers - 1;
811 off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
812 }
813 i->iov_offset = off;
814 i->idx = idx;
815 pipe_truncate(i);
816 return;
817 }
818 if (unroll <= i->iov_offset) {
819 i->iov_offset -= unroll;
820 return;
821 }
822 unroll -= i->iov_offset;
823 if (i->type & ITER_BVEC) {
824 const struct bio_vec *bvec = i->bvec;
825 while (1) {
826 size_t n = (--bvec)->bv_len;
827 i->nr_segs++;
828 if (unroll <= n) {
829 i->bvec = bvec;
830 i->iov_offset = n - unroll;
831 return;
832 }
833 unroll -= n;
834 }
835 } else { /* same logics for iovec and kvec */
836 const struct iovec *iov = i->iov;
837 while (1) {
838 size_t n = (--iov)->iov_len;
839 i->nr_segs++;
840 if (unroll <= n) {
841 i->iov = iov;
842 i->iov_offset = n - unroll;
843 return;
844 }
845 unroll -= n;
846 }
847 }
848}
849EXPORT_SYMBOL(iov_iter_revert);
850
789/* 851/*
790 * Return the count of just the current iov_iter segment. 852 * Return the count of just the current iov_iter segment.
791 */ 853 */
@@ -839,6 +901,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
839 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); 901 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
840 i->iov_offset = 0; 902 i->iov_offset = 0;
841 i->count = count; 903 i->count = count;
904 i->start_idx = i->idx;
842} 905}
843EXPORT_SYMBOL(iov_iter_pipe); 906EXPORT_SYMBOL(iov_iter_pipe);
844 907
diff --git a/lib/syscall.c b/lib/syscall.c
index 17d5ff5fa6a3..2c6cd1b5c3ea 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -12,6 +12,7 @@ static int collect_syscall(struct task_struct *target, long *callno,
12 12
13 if (!try_get_task_stack(target)) { 13 if (!try_get_task_stack(target)) {
14 /* Task has no stack, so the task isn't in a syscall. */ 14 /* Task has no stack, so the task isn't in a syscall. */
15 *sp = *pc = 0;
15 *callno = -1; 16 *callno = -1;
16 return 0; 17 return 0;
17 } 18 }
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 0b1d3140fbb8..a25c9763fce1 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -20,6 +20,7 @@
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/kasan.h>
23 24
24/* 25/*
25 * Note: test functions are marked noinline so that their names appear in 26 * Note: test functions are marked noinline so that their names appear in
@@ -474,6 +475,12 @@ static noinline void __init use_after_scope_test(void)
474 475
475static int __init kmalloc_tests_init(void) 476static int __init kmalloc_tests_init(void)
476{ 477{
478 /*
479 * Temporarily enable multi-shot mode. Otherwise, we'd only get a
480 * report for the first case.
481 */
482 bool multishot = kasan_save_enable_multi_shot();
483
477 kmalloc_oob_right(); 484 kmalloc_oob_right();
478 kmalloc_oob_left(); 485 kmalloc_oob_left();
479 kmalloc_node_oob_right(); 486 kmalloc_node_oob_right();
@@ -499,6 +506,9 @@ static int __init kmalloc_tests_init(void)
499 ksize_unpoisons_memory(); 506 ksize_unpoisons_memory();
500 copy_user_test(); 507 copy_user_test();
501 use_after_scope_test(); 508 use_after_scope_test();
509
510 kasan_restore_multi_shot(multishot);
511
502 return -EAGAIN; 512 return -EAGAIN;
503} 513}
504 514
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 1ebc93e179f3..f3c4f9d22821 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -240,18 +240,18 @@ static ssize_t defrag_store(struct kobject *kobj,
240 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 240 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
241 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 241 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
242 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 242 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
243 } else if (!memcmp("defer", buf,
244 min(sizeof("defer")-1, count))) {
245 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
246 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
247 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
248 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
249 } else if (!memcmp("defer+madvise", buf, 243 } else if (!memcmp("defer+madvise", buf,
250 min(sizeof("defer+madvise")-1, count))) { 244 min(sizeof("defer+madvise")-1, count))) {
251 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 245 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
252 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 246 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
253 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 247 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
254 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 248 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
249 } else if (!memcmp("defer", buf,
250 min(sizeof("defer")-1, count))) {
251 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
252 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
253 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
254 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
255 } else if (!memcmp("madvise", buf, 255 } else if (!memcmp("madvise", buf,
256 min(sizeof("madvise")-1, count))) { 256 min(sizeof("madvise")-1, count))) {
257 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 257 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
@@ -1568,8 +1568,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1568 deactivate_page(page); 1568 deactivate_page(page);
1569 1569
1570 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { 1570 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
1571 orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, 1571 pmdp_invalidate(vma, addr, pmd);
1572 tlb->fullmm);
1573 orig_pmd = pmd_mkold(orig_pmd); 1572 orig_pmd = pmd_mkold(orig_pmd);
1574 orig_pmd = pmd_mkclean(orig_pmd); 1573 orig_pmd = pmd_mkclean(orig_pmd);
1575 1574
@@ -1724,37 +1723,69 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1724{ 1723{
1725 struct mm_struct *mm = vma->vm_mm; 1724 struct mm_struct *mm = vma->vm_mm;
1726 spinlock_t *ptl; 1725 spinlock_t *ptl;
1727 int ret = 0; 1726 pmd_t entry;
1727 bool preserve_write;
1728 int ret;
1728 1729
1729 ptl = __pmd_trans_huge_lock(pmd, vma); 1730 ptl = __pmd_trans_huge_lock(pmd, vma);
1730 if (ptl) { 1731 if (!ptl)
1731 pmd_t entry; 1732 return 0;
1732 bool preserve_write = prot_numa && pmd_write(*pmd);
1733 ret = 1;
1734 1733
1735 /* 1734 preserve_write = prot_numa && pmd_write(*pmd);
1736 * Avoid trapping faults against the zero page. The read-only 1735 ret = 1;
1737 * data is likely to be read-cached on the local CPU and
1738 * local/remote hits to the zero page are not interesting.
1739 */
1740 if (prot_numa && is_huge_zero_pmd(*pmd)) {
1741 spin_unlock(ptl);
1742 return ret;
1743 }
1744 1736
1745 if (!prot_numa || !pmd_protnone(*pmd)) { 1737 /*
1746 entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd); 1738 * Avoid trapping faults against the zero page. The read-only
1747 entry = pmd_modify(entry, newprot); 1739 * data is likely to be read-cached on the local CPU and
1748 if (preserve_write) 1740 * local/remote hits to the zero page are not interesting.
1749 entry = pmd_mk_savedwrite(entry); 1741 */
1750 ret = HPAGE_PMD_NR; 1742 if (prot_numa && is_huge_zero_pmd(*pmd))
1751 set_pmd_at(mm, addr, pmd, entry); 1743 goto unlock;
1752 BUG_ON(vma_is_anonymous(vma) && !preserve_write && 1744
1753 pmd_write(entry)); 1745 if (prot_numa && pmd_protnone(*pmd))
1754 } 1746 goto unlock;
1755 spin_unlock(ptl); 1747
1756 } 1748 /*
1749 * In case prot_numa, we are under down_read(mmap_sem). It's critical
1750 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
1751 * which is also under down_read(mmap_sem):
1752 *
1753 * CPU0: CPU1:
1754 * change_huge_pmd(prot_numa=1)
1755 * pmdp_huge_get_and_clear_notify()
1756 * madvise_dontneed()
1757 * zap_pmd_range()
1758 * pmd_trans_huge(*pmd) == 0 (without ptl)
1759 * // skip the pmd
1760 * set_pmd_at();
1761 * // pmd is re-established
1762 *
1763 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
1764 * which may break userspace.
1765 *
1766 * pmdp_invalidate() is required to make sure we don't miss
1767 * dirty/young flags set by hardware.
1768 */
1769 entry = *pmd;
1770 pmdp_invalidate(vma, addr, pmd);
1757 1771
1772 /*
1773 * Recover dirty/young flags. It relies on pmdp_invalidate to not
1774 * corrupt them.
1775 */
1776 if (pmd_dirty(*pmd))
1777 entry = pmd_mkdirty(entry);
1778 if (pmd_young(*pmd))
1779 entry = pmd_mkyoung(entry);
1780
1781 entry = pmd_modify(entry, newprot);
1782 if (preserve_write)
1783 entry = pmd_mk_savedwrite(entry);
1784 ret = HPAGE_PMD_NR;
1785 set_pmd_at(mm, addr, pmd, entry);
1786 BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
1787unlock:
1788 spin_unlock(ptl);
1758 return ret; 1789 return ret;
1759} 1790}
1760 1791
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3d0aab9ee80d..e5828875f7bb 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4403,7 +4403,9 @@ int hugetlb_reserve_pages(struct inode *inode,
4403 return 0; 4403 return 0;
4404out_err: 4404out_err:
4405 if (!vma || vma->vm_flags & VM_MAYSHARE) 4405 if (!vma || vma->vm_flags & VM_MAYSHARE)
4406 region_abort(resv_map, from, to); 4406 /* Don't call region_abort if region_chg failed */
4407 if (chg >= 0)
4408 region_abort(resv_map, from, to);
4407 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 4409 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4408 kref_put(&resv_map->refs, resv_map_release); 4410 kref_put(&resv_map->refs, resv_map_release);
4409 return ret; 4411 return ret;
@@ -4651,6 +4653,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4651{ 4653{
4652 struct page *page = NULL; 4654 struct page *page = NULL;
4653 spinlock_t *ptl; 4655 spinlock_t *ptl;
4656 pte_t pte;
4654retry: 4657retry:
4655 ptl = pmd_lockptr(mm, pmd); 4658 ptl = pmd_lockptr(mm, pmd);
4656 spin_lock(ptl); 4659 spin_lock(ptl);
@@ -4660,12 +4663,13 @@ retry:
4660 */ 4663 */
4661 if (!pmd_huge(*pmd)) 4664 if (!pmd_huge(*pmd))
4662 goto out; 4665 goto out;
4663 if (pmd_present(*pmd)) { 4666 pte = huge_ptep_get((pte_t *)pmd);
4667 if (pte_present(pte)) {
4664 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); 4668 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4665 if (flags & FOLL_GET) 4669 if (flags & FOLL_GET)
4666 get_page(page); 4670 get_page(page);
4667 } else { 4671 } else {
4668 if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) { 4672 if (is_hugetlb_entry_migration(pte)) {
4669 spin_unlock(ptl); 4673 spin_unlock(ptl);
4670 __migration_entry_wait(mm, (pte_t *)pmd, ptl); 4674 __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4671 goto retry; 4675 goto retry;
diff --git a/mm/internal.h b/mm/internal.h
index ccfc2a2969f4..266efaeaa370 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -481,6 +481,13 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
481enum ttu_flags; 481enum ttu_flags;
482struct tlbflush_unmap_batch; 482struct tlbflush_unmap_batch;
483 483
484
485/*
486 * only for MM internal work items which do not depend on
487 * any allocations or locks which might depend on allocations
488 */
489extern struct workqueue_struct *mm_percpu_wq;
490
484#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 491#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
485void try_to_unmap_flush(void); 492void try_to_unmap_flush(void);
486void try_to_unmap_flush_dirty(void); 493void try_to_unmap_flush_dirty(void);
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 1c260e6b3b3c..dd2dea8eb077 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -96,11 +96,6 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
96 << KASAN_SHADOW_SCALE_SHIFT); 96 << KASAN_SHADOW_SCALE_SHIFT);
97} 97}
98 98
99static inline bool kasan_report_enabled(void)
100{
101 return !current->kasan_depth;
102}
103
104void kasan_report(unsigned long addr, size_t size, 99void kasan_report(unsigned long addr, size_t size,
105 bool is_write, unsigned long ip); 100 bool is_write, unsigned long ip);
106void kasan_report_double_free(struct kmem_cache *cache, void *object, 101void kasan_report_double_free(struct kmem_cache *cache, void *object,
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index f479365530b6..ab42a0803f16 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -13,7 +13,9 @@
13 * 13 *
14 */ 14 */
15 15
16#include <linux/bitops.h>
16#include <linux/ftrace.h> 17#include <linux/ftrace.h>
18#include <linux/init.h>
17#include <linux/kernel.h> 19#include <linux/kernel.h>
18#include <linux/mm.h> 20#include <linux/mm.h>
19#include <linux/printk.h> 21#include <linux/printk.h>
@@ -293,6 +295,40 @@ static void kasan_report_error(struct kasan_access_info *info)
293 kasan_end_report(&flags); 295 kasan_end_report(&flags);
294} 296}
295 297
298static unsigned long kasan_flags;
299
300#define KASAN_BIT_REPORTED 0
301#define KASAN_BIT_MULTI_SHOT 1
302
303bool kasan_save_enable_multi_shot(void)
304{
305 return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
306}
307EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot);
308
309void kasan_restore_multi_shot(bool enabled)
310{
311 if (!enabled)
312 clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
313}
314EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
315
316static int __init kasan_set_multi_shot(char *str)
317{
318 set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
319 return 1;
320}
321__setup("kasan_multi_shot", kasan_set_multi_shot);
322
323static inline bool kasan_report_enabled(void)
324{
325 if (current->kasan_depth)
326 return false;
327 if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
328 return true;
329 return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags);
330}
331
296void kasan_report(unsigned long addr, size_t size, 332void kasan_report(unsigned long addr, size_t size,
297 bool is_write, unsigned long ip) 333 bool is_write, unsigned long ip)
298{ 334{
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 26c874e90b12..20036d4f9f13 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1416,7 +1416,7 @@ static void kmemleak_scan(void)
1416 /* data/bss scanning */ 1416 /* data/bss scanning */
1417 scan_large_block(_sdata, _edata); 1417 scan_large_block(_sdata, _edata);
1418 scan_large_block(__bss_start, __bss_stop); 1418 scan_large_block(__bss_start, __bss_stop);
1419 scan_large_block(__start_data_ro_after_init, __end_data_ro_after_init); 1419 scan_large_block(__start_ro_after_init, __end_ro_after_init);
1420 1420
1421#ifdef CONFIG_SMP 1421#ifdef CONFIG_SMP
1422 /* per-cpu sections scanning */ 1422 /* per-cpu sections scanning */
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 75b2745bac41..37d0b334bfe9 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1529,7 +1529,6 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1529COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, 1529COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1530 compat_ulong_t, maxnode) 1530 compat_ulong_t, maxnode)
1531{ 1531{
1532 long err = 0;
1533 unsigned long __user *nm = NULL; 1532 unsigned long __user *nm = NULL;
1534 unsigned long nr_bits, alloc_size; 1533 unsigned long nr_bits, alloc_size;
1535 DECLARE_BITMAP(bm, MAX_NUMNODES); 1534 DECLARE_BITMAP(bm, MAX_NUMNODES);
@@ -1538,14 +1537,13 @@ COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1538 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1537 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1539 1538
1540 if (nmask) { 1539 if (nmask) {
1541 err = compat_get_bitmap(bm, nmask, nr_bits); 1540 if (compat_get_bitmap(bm, nmask, nr_bits))
1541 return -EFAULT;
1542 nm = compat_alloc_user_space(alloc_size); 1542 nm = compat_alloc_user_space(alloc_size);
1543 err |= copy_to_user(nm, bm, alloc_size); 1543 if (copy_to_user(nm, bm, alloc_size))
1544 return -EFAULT;
1544 } 1545 }
1545 1546
1546 if (err)
1547 return -EFAULT;
1548
1549 return sys_set_mempolicy(mode, nm, nr_bits+1); 1547 return sys_set_mempolicy(mode, nm, nr_bits+1);
1550} 1548}
1551 1549
@@ -1553,7 +1551,6 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1553 compat_ulong_t, mode, compat_ulong_t __user *, nmask, 1551 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1554 compat_ulong_t, maxnode, compat_ulong_t, flags) 1552 compat_ulong_t, maxnode, compat_ulong_t, flags)
1555{ 1553{
1556 long err = 0;
1557 unsigned long __user *nm = NULL; 1554 unsigned long __user *nm = NULL;
1558 unsigned long nr_bits, alloc_size; 1555 unsigned long nr_bits, alloc_size;
1559 nodemask_t bm; 1556 nodemask_t bm;
@@ -1562,14 +1559,13 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1562 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1559 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1563 1560
1564 if (nmask) { 1561 if (nmask) {
1565 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); 1562 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1563 return -EFAULT;
1566 nm = compat_alloc_user_space(alloc_size); 1564 nm = compat_alloc_user_space(alloc_size);
1567 err |= copy_to_user(nm, nodes_addr(bm), alloc_size); 1565 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1566 return -EFAULT;
1568 } 1567 }
1569 1568
1570 if (err)
1571 return -EFAULT;
1572
1573 return sys_mbind(start, len, mode, nm, nr_bits+1, flags); 1569 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1574} 1570}
1575 1571
diff --git a/mm/migrate.c b/mm/migrate.c
index 9a0897a14d37..738f1d5f8350 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -184,9 +184,9 @@ void putback_movable_pages(struct list_head *l)
184 unlock_page(page); 184 unlock_page(page);
185 put_page(page); 185 put_page(page);
186 } else { 186 } else {
187 putback_lru_page(page);
188 dec_node_page_state(page, NR_ISOLATED_ANON + 187 dec_node_page_state(page, NR_ISOLATED_ANON +
189 page_is_file_cache(page)); 188 page_is_file_cache(page));
189 putback_lru_page(page);
190 } 190 }
191 } 191 }
192} 192}
@@ -209,8 +209,11 @@ static int remove_migration_pte(struct page *page, struct vm_area_struct *vma,
209 209
210 VM_BUG_ON_PAGE(PageTail(page), page); 210 VM_BUG_ON_PAGE(PageTail(page), page);
211 while (page_vma_mapped_walk(&pvmw)) { 211 while (page_vma_mapped_walk(&pvmw)) {
212 new = page - pvmw.page->index + 212 if (PageKsm(page))
213 linear_page_index(vma, pvmw.address); 213 new = page;
214 else
215 new = page - pvmw.page->index +
216 linear_page_index(vma, pvmw.address);
214 217
215 get_page(new); 218 get_page(new);
216 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); 219 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6cbde310abed..07efbc3a8656 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1090,10 +1090,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
1090{ 1090{
1091 int migratetype = 0; 1091 int migratetype = 0;
1092 int batch_free = 0; 1092 int batch_free = 0;
1093 unsigned long nr_scanned, flags; 1093 unsigned long nr_scanned;
1094 bool isolated_pageblocks; 1094 bool isolated_pageblocks;
1095 1095
1096 spin_lock_irqsave(&zone->lock, flags); 1096 spin_lock(&zone->lock);
1097 isolated_pageblocks = has_isolate_pageblock(zone); 1097 isolated_pageblocks = has_isolate_pageblock(zone);
1098 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED); 1098 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
1099 if (nr_scanned) 1099 if (nr_scanned)
@@ -1142,7 +1142,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
1142 trace_mm_page_pcpu_drain(page, 0, mt); 1142 trace_mm_page_pcpu_drain(page, 0, mt);
1143 } while (--count && --batch_free && !list_empty(list)); 1143 } while (--count && --batch_free && !list_empty(list));
1144 } 1144 }
1145 spin_unlock_irqrestore(&zone->lock, flags); 1145 spin_unlock(&zone->lock);
1146} 1146}
1147 1147
1148static void free_one_page(struct zone *zone, 1148static void free_one_page(struct zone *zone,
@@ -1150,9 +1150,8 @@ static void free_one_page(struct zone *zone,
1150 unsigned int order, 1150 unsigned int order,
1151 int migratetype) 1151 int migratetype)
1152{ 1152{
1153 unsigned long nr_scanned, flags; 1153 unsigned long nr_scanned;
1154 spin_lock_irqsave(&zone->lock, flags); 1154 spin_lock(&zone->lock);
1155 __count_vm_events(PGFREE, 1 << order);
1156 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED); 1155 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
1157 if (nr_scanned) 1156 if (nr_scanned)
1158 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned); 1157 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
@@ -1162,7 +1161,7 @@ static void free_one_page(struct zone *zone,
1162 migratetype = get_pfnblock_migratetype(page, pfn); 1161 migratetype = get_pfnblock_migratetype(page, pfn);
1163 } 1162 }
1164 __free_one_page(page, pfn, zone, order, migratetype); 1163 __free_one_page(page, pfn, zone, order, migratetype);
1165 spin_unlock_irqrestore(&zone->lock, flags); 1164 spin_unlock(&zone->lock);
1166} 1165}
1167 1166
1168static void __meminit __init_single_page(struct page *page, unsigned long pfn, 1167static void __meminit __init_single_page(struct page *page, unsigned long pfn,
@@ -1240,6 +1239,7 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1240 1239
1241static void __free_pages_ok(struct page *page, unsigned int order) 1240static void __free_pages_ok(struct page *page, unsigned int order)
1242{ 1241{
1242 unsigned long flags;
1243 int migratetype; 1243 int migratetype;
1244 unsigned long pfn = page_to_pfn(page); 1244 unsigned long pfn = page_to_pfn(page);
1245 1245
@@ -1247,7 +1247,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
1247 return; 1247 return;
1248 1248
1249 migratetype = get_pfnblock_migratetype(page, pfn); 1249 migratetype = get_pfnblock_migratetype(page, pfn);
1250 local_irq_save(flags);
1251 __count_vm_events(PGFREE, 1 << order);
1250 free_one_page(page_zone(page), page, pfn, order, migratetype); 1252 free_one_page(page_zone(page), page, pfn, order, migratetype);
1253 local_irq_restore(flags);
1251} 1254}
1252 1255
1253static void __init __free_pages_boot_core(struct page *page, unsigned int order) 1256static void __init __free_pages_boot_core(struct page *page, unsigned int order)
@@ -2219,9 +2222,8 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
2219 int migratetype, bool cold) 2222 int migratetype, bool cold)
2220{ 2223{
2221 int i, alloced = 0; 2224 int i, alloced = 0;
2222 unsigned long flags;
2223 2225
2224 spin_lock_irqsave(&zone->lock, flags); 2226 spin_lock(&zone->lock);
2225 for (i = 0; i < count; ++i) { 2227 for (i = 0; i < count; ++i) {
2226 struct page *page = __rmqueue(zone, order, migratetype); 2228 struct page *page = __rmqueue(zone, order, migratetype);
2227 if (unlikely(page == NULL)) 2229 if (unlikely(page == NULL))
@@ -2257,7 +2259,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
2257 * pages added to the pcp list. 2259 * pages added to the pcp list.
2258 */ 2260 */
2259 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 2261 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2260 spin_unlock_irqrestore(&zone->lock, flags); 2262 spin_unlock(&zone->lock);
2261 return alloced; 2263 return alloced;
2262} 2264}
2263 2265
@@ -2373,6 +2375,13 @@ void drain_all_pages(struct zone *zone)
2373 */ 2375 */
2374 static cpumask_t cpus_with_pcps; 2376 static cpumask_t cpus_with_pcps;
2375 2377
2378 /*
2379 * Make sure nobody triggers this path before mm_percpu_wq is fully
2380 * initialized.
2381 */
2382 if (WARN_ON_ONCE(!mm_percpu_wq))
2383 return;
2384
2376 /* Workqueues cannot recurse */ 2385 /* Workqueues cannot recurse */
2377 if (current->flags & PF_WQ_WORKER) 2386 if (current->flags & PF_WQ_WORKER)
2378 return; 2387 return;
@@ -2422,7 +2431,7 @@ void drain_all_pages(struct zone *zone)
2422 for_each_cpu(cpu, &cpus_with_pcps) { 2431 for_each_cpu(cpu, &cpus_with_pcps) {
2423 struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu); 2432 struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
2424 INIT_WORK(work, drain_local_pages_wq); 2433 INIT_WORK(work, drain_local_pages_wq);
2425 schedule_work_on(cpu, work); 2434 queue_work_on(cpu, mm_percpu_wq, work);
2426 } 2435 }
2427 for_each_cpu(cpu, &cpus_with_pcps) 2436 for_each_cpu(cpu, &cpus_with_pcps)
2428 flush_work(per_cpu_ptr(&pcpu_drain, cpu)); 2437 flush_work(per_cpu_ptr(&pcpu_drain, cpu));
@@ -2478,20 +2487,17 @@ void free_hot_cold_page(struct page *page, bool cold)
2478{ 2487{
2479 struct zone *zone = page_zone(page); 2488 struct zone *zone = page_zone(page);
2480 struct per_cpu_pages *pcp; 2489 struct per_cpu_pages *pcp;
2490 unsigned long flags;
2481 unsigned long pfn = page_to_pfn(page); 2491 unsigned long pfn = page_to_pfn(page);
2482 int migratetype; 2492 int migratetype;
2483 2493
2484 if (in_interrupt()) {
2485 __free_pages_ok(page, 0);
2486 return;
2487 }
2488
2489 if (!free_pcp_prepare(page)) 2494 if (!free_pcp_prepare(page))
2490 return; 2495 return;
2491 2496
2492 migratetype = get_pfnblock_migratetype(page, pfn); 2497 migratetype = get_pfnblock_migratetype(page, pfn);
2493 set_pcppage_migratetype(page, migratetype); 2498 set_pcppage_migratetype(page, migratetype);
2494 preempt_disable(); 2499 local_irq_save(flags);
2500 __count_vm_event(PGFREE);
2495 2501
2496 /* 2502 /*
2497 * We only track unmovable, reclaimable and movable on pcp lists. 2503 * We only track unmovable, reclaimable and movable on pcp lists.
@@ -2508,7 +2514,6 @@ void free_hot_cold_page(struct page *page, bool cold)
2508 migratetype = MIGRATE_MOVABLE; 2514 migratetype = MIGRATE_MOVABLE;
2509 } 2515 }
2510 2516
2511 __count_vm_event(PGFREE);
2512 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2517 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2513 if (!cold) 2518 if (!cold)
2514 list_add(&page->lru, &pcp->lists[migratetype]); 2519 list_add(&page->lru, &pcp->lists[migratetype]);
@@ -2522,7 +2527,7 @@ void free_hot_cold_page(struct page *page, bool cold)
2522 } 2527 }
2523 2528
2524out: 2529out:
2525 preempt_enable(); 2530 local_irq_restore(flags);
2526} 2531}
2527 2532
2528/* 2533/*
@@ -2647,8 +2652,6 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
2647{ 2652{
2648 struct page *page; 2653 struct page *page;
2649 2654
2650 VM_BUG_ON(in_interrupt());
2651
2652 do { 2655 do {
2653 if (list_empty(list)) { 2656 if (list_empty(list)) {
2654 pcp->count += rmqueue_bulk(zone, 0, 2657 pcp->count += rmqueue_bulk(zone, 0,
@@ -2679,8 +2682,9 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
2679 struct list_head *list; 2682 struct list_head *list;
2680 bool cold = ((gfp_flags & __GFP_COLD) != 0); 2683 bool cold = ((gfp_flags & __GFP_COLD) != 0);
2681 struct page *page; 2684 struct page *page;
2685 unsigned long flags;
2682 2686
2683 preempt_disable(); 2687 local_irq_save(flags);
2684 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2688 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2685 list = &pcp->lists[migratetype]; 2689 list = &pcp->lists[migratetype];
2686 page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list); 2690 page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list);
@@ -2688,7 +2692,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
2688 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2692 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2689 zone_statistics(preferred_zone, zone); 2693 zone_statistics(preferred_zone, zone);
2690 } 2694 }
2691 preempt_enable(); 2695 local_irq_restore(flags);
2692 return page; 2696 return page;
2693} 2697}
2694 2698
@@ -2704,7 +2708,7 @@ struct page *rmqueue(struct zone *preferred_zone,
2704 unsigned long flags; 2708 unsigned long flags;
2705 struct page *page; 2709 struct page *page;
2706 2710
2707 if (likely(order == 0) && !in_interrupt()) { 2711 if (likely(order == 0)) {
2708 page = rmqueue_pcplist(preferred_zone, zone, order, 2712 page = rmqueue_pcplist(preferred_zone, zone, order,
2709 gfp_flags, migratetype); 2713 gfp_flags, migratetype);
2710 goto out; 2714 goto out;
@@ -4519,13 +4523,13 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
4519 K(node_page_state(pgdat, NR_FILE_MAPPED)), 4523 K(node_page_state(pgdat, NR_FILE_MAPPED)),
4520 K(node_page_state(pgdat, NR_FILE_DIRTY)), 4524 K(node_page_state(pgdat, NR_FILE_DIRTY)),
4521 K(node_page_state(pgdat, NR_WRITEBACK)), 4525 K(node_page_state(pgdat, NR_WRITEBACK)),
4526 K(node_page_state(pgdat, NR_SHMEM)),
4522#ifdef CONFIG_TRANSPARENT_HUGEPAGE 4527#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4523 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR), 4528 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
4524 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) 4529 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
4525 * HPAGE_PMD_NR), 4530 * HPAGE_PMD_NR),
4526 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR), 4531 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
4527#endif 4532#endif
4528 K(node_page_state(pgdat, NR_SHMEM)),
4529 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), 4533 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
4530 K(node_page_state(pgdat, NR_UNSTABLE_NFS)), 4534 K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
4531 node_page_state(pgdat, NR_PAGES_SCANNED), 4535 node_page_state(pgdat, NR_PAGES_SCANNED),
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index c4c9def8ffea..de9c40d7304a 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -111,12 +111,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
111 if (pvmw->pmd && !pvmw->pte) 111 if (pvmw->pmd && !pvmw->pte)
112 return not_found(pvmw); 112 return not_found(pvmw);
113 113
114 /* Only for THP, seek to next pte entry makes sense */ 114 if (pvmw->pte)
115 if (pvmw->pte) {
116 if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
117 return not_found(pvmw);
118 goto next_pte; 115 goto next_pte;
119 }
120 116
121 if (unlikely(PageHuge(pvmw->page))) { 117 if (unlikely(PageHuge(pvmw->page))) {
122 /* when pud is not present, pte will be NULL */ 118 /* when pud is not present, pte will be NULL */
@@ -165,9 +161,14 @@ restart:
165 while (1) { 161 while (1) {
166 if (check_pte(pvmw)) 162 if (check_pte(pvmw))
167 return true; 163 return true;
168next_pte: do { 164next_pte:
165 /* Seek to next pte only makes sense for THP */
166 if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
167 return not_found(pvmw);
168 do {
169 pvmw->address += PAGE_SIZE; 169 pvmw->address += PAGE_SIZE;
170 if (pvmw->address >= 170 if (pvmw->address >= pvmw->vma->vm_end ||
171 pvmw->address >=
171 __vma_address(pvmw->page, pvmw->vma) + 172 __vma_address(pvmw->page, pvmw->vma) +
172 hpage_nr_pages(pvmw->page) * PAGE_SIZE) 173 hpage_nr_pages(pvmw->page) * PAGE_SIZE)
173 return not_found(pvmw); 174 return not_found(pvmw);
diff --git a/mm/rmap.c b/mm/rmap.c
index 49ed681ccc7b..f6838015810f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1159,7 +1159,7 @@ void page_add_file_rmap(struct page *page, bool compound)
1159 goto out; 1159 goto out;
1160 } 1160 }
1161 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr); 1161 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
1162 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1162 mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, nr);
1163out: 1163out:
1164 unlock_page_memcg(page); 1164 unlock_page_memcg(page);
1165} 1165}
@@ -1199,7 +1199,7 @@ static void page_remove_file_rmap(struct page *page, bool compound)
1199 * pte lock(a spinlock) is held, which implies preemption disabled. 1199 * pte lock(a spinlock) is held, which implies preemption disabled.
1200 */ 1200 */
1201 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr); 1201 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
1202 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1202 mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, -nr);
1203 1203
1204 if (unlikely(PageMlocked(page))) 1204 if (unlikely(PageMlocked(page)))
1205 clear_page_mlock(page); 1205 clear_page_mlock(page);
diff --git a/mm/swap.c b/mm/swap.c
index c4910f14f957..5dabf444d724 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -670,30 +670,19 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
670 670
671static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); 671static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
672 672
673/*
674 * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
675 * workqueue, aiding in getting memory freed.
676 */
677static struct workqueue_struct *lru_add_drain_wq;
678
679static int __init lru_init(void)
680{
681 lru_add_drain_wq = alloc_workqueue("lru-add-drain", WQ_MEM_RECLAIM, 0);
682
683 if (WARN(!lru_add_drain_wq,
684 "Failed to create workqueue lru_add_drain_wq"))
685 return -ENOMEM;
686
687 return 0;
688}
689early_initcall(lru_init);
690
691void lru_add_drain_all(void) 673void lru_add_drain_all(void)
692{ 674{
693 static DEFINE_MUTEX(lock); 675 static DEFINE_MUTEX(lock);
694 static struct cpumask has_work; 676 static struct cpumask has_work;
695 int cpu; 677 int cpu;
696 678
679 /*
680 * Make sure nobody triggers this path before mm_percpu_wq is fully
681 * initialized.
682 */
683 if (WARN_ON(!mm_percpu_wq))
684 return;
685
697 mutex_lock(&lock); 686 mutex_lock(&lock);
698 get_online_cpus(); 687 get_online_cpus();
699 cpumask_clear(&has_work); 688 cpumask_clear(&has_work);
@@ -707,7 +696,7 @@ void lru_add_drain_all(void)
707 pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || 696 pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
708 need_activate_page_drain(cpu)) { 697 need_activate_page_drain(cpu)) {
709 INIT_WORK(work, lru_add_drain_per_cpu); 698 INIT_WORK(work, lru_add_drain_per_cpu);
710 queue_work_on(cpu, lru_add_drain_wq, work); 699 queue_work_on(cpu, mm_percpu_wq, work);
711 cpumask_set_cpu(cpu, &has_work); 700 cpumask_set_cpu(cpu, &has_work);
712 } 701 }
713 } 702 }
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index 310ac0b8f974..ac6318a064d3 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -201,6 +201,8 @@ void swap_cgroup_swapoff(int type)
201 struct page *page = map[i]; 201 struct page *page = map[i];
202 if (page) 202 if (page)
203 __free_page(page); 203 __free_page(page);
204 if (!(i % SWAP_CLUSTER_MAX))
205 cond_resched();
204 } 206 }
205 vfree(map); 207 vfree(map);
206 } 208 }
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index 7ebb23836f68..b1ccb58ad397 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -267,8 +267,6 @@ int free_swap_slot(swp_entry_t entry)
267{ 267{
268 struct swap_slots_cache *cache; 268 struct swap_slots_cache *cache;
269 269
270 WARN_ON_ONCE(!swap_slot_cache_initialized);
271
272 cache = &get_cpu_var(swp_slots); 270 cache = &get_cpu_var(swp_slots);
273 if (use_swap_slot_cache && cache->slots_ret) { 271 if (use_swap_slot_cache && cache->slots_ret) {
274 spin_lock_irq(&cache->free_lock); 272 spin_lock_irq(&cache->free_lock);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index b1947f0cbee2..5a4f5c5a31e8 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1552,7 +1552,6 @@ static const struct file_operations proc_vmstat_file_operations = {
1552#endif /* CONFIG_PROC_FS */ 1552#endif /* CONFIG_PROC_FS */
1553 1553
1554#ifdef CONFIG_SMP 1554#ifdef CONFIG_SMP
1555static struct workqueue_struct *vmstat_wq;
1556static DEFINE_PER_CPU(struct delayed_work, vmstat_work); 1555static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1557int sysctl_stat_interval __read_mostly = HZ; 1556int sysctl_stat_interval __read_mostly = HZ;
1558 1557
@@ -1623,7 +1622,7 @@ static void vmstat_update(struct work_struct *w)
1623 * to occur in the future. Keep on running the 1622 * to occur in the future. Keep on running the
1624 * update worker thread. 1623 * update worker thread.
1625 */ 1624 */
1626 queue_delayed_work_on(smp_processor_id(), vmstat_wq, 1625 queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
1627 this_cpu_ptr(&vmstat_work), 1626 this_cpu_ptr(&vmstat_work),
1628 round_jiffies_relative(sysctl_stat_interval)); 1627 round_jiffies_relative(sysctl_stat_interval));
1629 } 1628 }
@@ -1702,7 +1701,7 @@ static void vmstat_shepherd(struct work_struct *w)
1702 struct delayed_work *dw = &per_cpu(vmstat_work, cpu); 1701 struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
1703 1702
1704 if (!delayed_work_pending(dw) && need_update(cpu)) 1703 if (!delayed_work_pending(dw) && need_update(cpu))
1705 queue_delayed_work_on(cpu, vmstat_wq, dw, 0); 1704 queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
1706 } 1705 }
1707 put_online_cpus(); 1706 put_online_cpus();
1708 1707
@@ -1718,7 +1717,6 @@ static void __init start_shepherd_timer(void)
1718 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu), 1717 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1719 vmstat_update); 1718 vmstat_update);
1720 1719
1721 vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1722 schedule_delayed_work(&shepherd, 1720 schedule_delayed_work(&shepherd,
1723 round_jiffies_relative(sysctl_stat_interval)); 1721 round_jiffies_relative(sysctl_stat_interval));
1724} 1722}
@@ -1764,11 +1762,15 @@ static int vmstat_cpu_dead(unsigned int cpu)
1764 1762
1765#endif 1763#endif
1766 1764
1767static int __init setup_vmstat(void) 1765struct workqueue_struct *mm_percpu_wq;
1766
1767void __init init_mm_internals(void)
1768{ 1768{
1769#ifdef CONFIG_SMP 1769 int ret __maybe_unused;
1770 int ret; 1770
1771 mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
1771 1772
1773#ifdef CONFIG_SMP
1772 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead", 1774 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
1773 NULL, vmstat_cpu_dead); 1775 NULL, vmstat_cpu_dead);
1774 if (ret < 0) 1776 if (ret < 0)
@@ -1792,9 +1794,7 @@ static int __init setup_vmstat(void)
1792 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); 1794 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
1793 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); 1795 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
1794#endif 1796#endif
1795 return 0;
1796} 1797}
1797module_init(setup_vmstat)
1798 1798
1799#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION) 1799#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1800 1800
diff --git a/mm/workingset.c b/mm/workingset.c
index ac839fca0e76..eda05c71fa49 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -532,7 +532,7 @@ static int __init workingset_init(void)
532 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", 532 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
533 timestamp_bits, max_order, bucket_order); 533 timestamp_bits, max_order, bucket_order);
534 534
535 ret = list_lru_init_key(&shadow_nodes, &shadow_nodes_key); 535 ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key);
536 if (ret) 536 if (ret)
537 goto err; 537 goto err;
538 ret = register_shrinker(&workingset_shadow_shrinker); 538 ret = register_shrinker(&workingset_shadow_shrinker);
diff --git a/mm/z3fold.c b/mm/z3fold.c
index f9492bccfd79..54f63c4a809a 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -185,6 +185,12 @@ static inline void z3fold_page_lock(struct z3fold_header *zhdr)
185 spin_lock(&zhdr->page_lock); 185 spin_lock(&zhdr->page_lock);
186} 186}
187 187
188/* Try to lock a z3fold page */
189static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
190{
191 return spin_trylock(&zhdr->page_lock);
192}
193
188/* Unlock a z3fold page */ 194/* Unlock a z3fold page */
189static inline void z3fold_page_unlock(struct z3fold_header *zhdr) 195static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
190{ 196{
@@ -385,7 +391,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
385 spin_lock(&pool->lock); 391 spin_lock(&pool->lock);
386 zhdr = list_first_entry_or_null(&pool->unbuddied[i], 392 zhdr = list_first_entry_or_null(&pool->unbuddied[i],
387 struct z3fold_header, buddy); 393 struct z3fold_header, buddy);
388 if (!zhdr) { 394 if (!zhdr || !z3fold_page_trylock(zhdr)) {
389 spin_unlock(&pool->lock); 395 spin_unlock(&pool->lock);
390 continue; 396 continue;
391 } 397 }
@@ -394,7 +400,6 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
394 spin_unlock(&pool->lock); 400 spin_unlock(&pool->lock);
395 401
396 page = virt_to_page(zhdr); 402 page = virt_to_page(zhdr);
397 z3fold_page_lock(zhdr);
398 if (zhdr->first_chunks == 0) { 403 if (zhdr->first_chunks == 0) {
399 if (zhdr->middle_chunks != 0 && 404 if (zhdr->middle_chunks != 0 &&
400 chunks >= zhdr->start_middle) 405 chunks >= zhdr->start_middle)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index b7ee9c34dbd6..d41edd28298b 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -276,7 +276,7 @@ struct zs_pool {
276struct zspage { 276struct zspage {
277 struct { 277 struct {
278 unsigned int fullness:FULLNESS_BITS; 278 unsigned int fullness:FULLNESS_BITS;
279 unsigned int class:CLASS_BITS; 279 unsigned int class:CLASS_BITS + 1;
280 unsigned int isolated:ISOLATED_BITS; 280 unsigned int isolated:ISOLATED_BITS;
281 unsigned int magic:MAGIC_VAL_BITS; 281 unsigned int magic:MAGIC_VAL_BITS;
282 }; 282 };
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 7c3d994e90d8..71343d0fec94 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -2477,6 +2477,16 @@ static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
2477 batadv_iv_ogm_schedule(hard_iface); 2477 batadv_iv_ogm_schedule(hard_iface);
2478} 2478}
2479 2479
2480/**
2481 * batadv_iv_init_sel_class - initialize GW selection class
2482 * @bat_priv: the bat priv with all the soft interface information
2483 */
2484static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv)
2485{
2486 /* set default TQ difference threshold to 20 */
2487 atomic_set(&bat_priv->gw.sel_class, 20);
2488}
2489
2480static struct batadv_gw_node * 2490static struct batadv_gw_node *
2481batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv) 2491batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
2482{ 2492{
@@ -2823,6 +2833,7 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
2823 .del_if = batadv_iv_ogm_orig_del_if, 2833 .del_if = batadv_iv_ogm_orig_del_if,
2824 }, 2834 },
2825 .gw = { 2835 .gw = {
2836 .init_sel_class = batadv_iv_init_sel_class,
2826 .get_best_gw_node = batadv_iv_gw_get_best_gw_node, 2837 .get_best_gw_node = batadv_iv_gw_get_best_gw_node,
2827 .is_eligible = batadv_iv_gw_is_eligible, 2838 .is_eligible = batadv_iv_gw_is_eligible,
2828#ifdef CONFIG_BATMAN_ADV_DEBUGFS 2839#ifdef CONFIG_BATMAN_ADV_DEBUGFS
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index 0acd081dd286..a36c8e7291d6 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -668,6 +668,16 @@ err_ifinfo1:
668 return ret; 668 return ret;
669} 669}
670 670
671/**
672 * batadv_v_init_sel_class - initialize GW selection class
673 * @bat_priv: the bat priv with all the soft interface information
674 */
675static void batadv_v_init_sel_class(struct batadv_priv *bat_priv)
676{
677 /* set default throughput difference threshold to 5Mbps */
678 atomic_set(&bat_priv->gw.sel_class, 50);
679}
680
671static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv, 681static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv,
672 char *buff, size_t count) 682 char *buff, size_t count)
673{ 683{
@@ -1052,6 +1062,7 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = {
1052 .dump = batadv_v_orig_dump, 1062 .dump = batadv_v_orig_dump,
1053 }, 1063 },
1054 .gw = { 1064 .gw = {
1065 .init_sel_class = batadv_v_init_sel_class,
1055 .store_sel_class = batadv_v_store_sel_class, 1066 .store_sel_class = batadv_v_store_sel_class,
1056 .show_sel_class = batadv_v_show_sel_class, 1067 .show_sel_class = batadv_v_show_sel_class,
1057 .get_best_gw_node = batadv_v_gw_get_best_gw_node, 1068 .get_best_gw_node = batadv_v_gw_get_best_gw_node,
@@ -1092,9 +1103,6 @@ int batadv_v_mesh_init(struct batadv_priv *bat_priv)
1092 if (ret < 0) 1103 if (ret < 0)
1093 return ret; 1104 return ret;
1094 1105
1095 /* set default throughput difference threshold to 5Mbps */
1096 atomic_set(&bat_priv->gw.sel_class, 50);
1097
1098 return 0; 1106 return 0;
1099} 1107}
1100 1108
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 11a23fd6e1a0..8f964beaac28 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -404,7 +404,7 @@ out:
404 * batadv_frag_create - create a fragment from skb 404 * batadv_frag_create - create a fragment from skb
405 * @skb: skb to create fragment from 405 * @skb: skb to create fragment from
406 * @frag_head: header to use in new fragment 406 * @frag_head: header to use in new fragment
407 * @mtu: size of new fragment 407 * @fragment_size: size of new fragment
408 * 408 *
409 * Split the passed skb into two fragments: A new one with size matching the 409 * Split the passed skb into two fragments: A new one with size matching the
410 * passed mtu and the old one with the rest. The new skb contains data from the 410 * passed mtu and the old one with the rest. The new skb contains data from the
@@ -414,11 +414,11 @@ out:
414 */ 414 */
415static struct sk_buff *batadv_frag_create(struct sk_buff *skb, 415static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
416 struct batadv_frag_packet *frag_head, 416 struct batadv_frag_packet *frag_head,
417 unsigned int mtu) 417 unsigned int fragment_size)
418{ 418{
419 struct sk_buff *skb_fragment; 419 struct sk_buff *skb_fragment;
420 unsigned int header_size = sizeof(*frag_head); 420 unsigned int header_size = sizeof(*frag_head);
421 unsigned int fragment_size = mtu - header_size; 421 unsigned int mtu = fragment_size + header_size;
422 422
423 skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN); 423 skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
424 if (!skb_fragment) 424 if (!skb_fragment)
@@ -456,7 +456,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
456 struct sk_buff *skb_fragment; 456 struct sk_buff *skb_fragment;
457 unsigned int mtu = neigh_node->if_incoming->net_dev->mtu; 457 unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
458 unsigned int header_size = sizeof(frag_header); 458 unsigned int header_size = sizeof(frag_header);
459 unsigned int max_fragment_size, max_packet_size; 459 unsigned int max_fragment_size, num_fragments;
460 int ret; 460 int ret;
461 461
462 /* To avoid merge and refragmentation at next-hops we never send 462 /* To avoid merge and refragmentation at next-hops we never send
@@ -464,10 +464,15 @@ int batadv_frag_send_packet(struct sk_buff *skb,
464 */ 464 */
465 mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE); 465 mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
466 max_fragment_size = mtu - header_size; 466 max_fragment_size = mtu - header_size;
467 max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS; 467
468 if (skb->len == 0 || max_fragment_size == 0)
469 return -EINVAL;
470
471 num_fragments = (skb->len - 1) / max_fragment_size + 1;
472 max_fragment_size = (skb->len - 1) / num_fragments + 1;
468 473
469 /* Don't even try to fragment, if we need more than 16 fragments */ 474 /* Don't even try to fragment, if we need more than 16 fragments */
470 if (skb->len > max_packet_size) { 475 if (num_fragments > BATADV_FRAG_MAX_FRAGMENTS) {
471 ret = -EAGAIN; 476 ret = -EAGAIN;
472 goto free_skb; 477 goto free_skb;
473 } 478 }
@@ -507,7 +512,8 @@ int batadv_frag_send_packet(struct sk_buff *skb,
507 goto put_primary_if; 512 goto put_primary_if;
508 } 513 }
509 514
510 skb_fragment = batadv_frag_create(skb, &frag_header, mtu); 515 skb_fragment = batadv_frag_create(skb, &frag_header,
516 max_fragment_size);
511 if (!skb_fragment) { 517 if (!skb_fragment) {
512 ret = -ENOMEM; 518 ret = -ENOMEM;
513 goto put_primary_if; 519 goto put_primary_if;
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 5db2e43e3775..33940c5c74a8 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -253,6 +253,11 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
253 */ 253 */
254void batadv_gw_init(struct batadv_priv *bat_priv) 254void batadv_gw_init(struct batadv_priv *bat_priv)
255{ 255{
256 if (bat_priv->algo_ops->gw.init_sel_class)
257 bat_priv->algo_ops->gw.init_sel_class(bat_priv);
258 else
259 atomic_set(&bat_priv->gw.sel_class, 1);
260
256 batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1, 261 batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1,
257 NULL, BATADV_TVLV_GW, 1, 262 NULL, BATADV_TVLV_GW, 1,
258 BATADV_TVLV_HANDLER_OGM_CIFNOTFND); 263 BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 5d099b2e6cfc..d042c99af028 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -819,7 +819,6 @@ static int batadv_softif_init_late(struct net_device *dev)
819 atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0); 819 atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0);
820#endif 820#endif
821 atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF); 821 atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF);
822 atomic_set(&bat_priv->gw.sel_class, 20);
823 atomic_set(&bat_priv->gw.bandwidth_down, 100); 822 atomic_set(&bat_priv->gw.bandwidth_down, 100);
824 atomic_set(&bat_priv->gw.bandwidth_up, 20); 823 atomic_set(&bat_priv->gw.bandwidth_up, 20);
825 atomic_set(&bat_priv->orig_interval, 1000); 824 atomic_set(&bat_priv->orig_interval, 1000);
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 66b25e410a41..246f21b4973b 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1489,6 +1489,7 @@ struct batadv_algo_orig_ops {
1489 1489
1490/** 1490/**
1491 * struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific) 1491 * struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific)
1492 * @init_sel_class: initialize GW selection class (optional)
1492 * @store_sel_class: parse and stores a new GW selection class (optional) 1493 * @store_sel_class: parse and stores a new GW selection class (optional)
1493 * @show_sel_class: prints the current GW selection class (optional) 1494 * @show_sel_class: prints the current GW selection class (optional)
1494 * @get_best_gw_node: select the best GW from the list of available nodes 1495 * @get_best_gw_node: select the best GW from the list of available nodes
@@ -1499,6 +1500,7 @@ struct batadv_algo_orig_ops {
1499 * @dump: dump gateways to a netlink socket (optional) 1500 * @dump: dump gateways to a netlink socket (optional)
1500 */ 1501 */
1501struct batadv_algo_gw_ops { 1502struct batadv_algo_gw_ops {
1503 void (*init_sel_class)(struct batadv_priv *bat_priv);
1502 ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff, 1504 ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff,
1503 size_t count); 1505 size_t count);
1504 ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff); 1506 ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff);
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index ea71513fca21..90f49a194249 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -119,6 +119,15 @@ static int br_dev_init(struct net_device *dev)
119 return err; 119 return err;
120} 120}
121 121
122static void br_dev_uninit(struct net_device *dev)
123{
124 struct net_bridge *br = netdev_priv(dev);
125
126 br_multicast_uninit_stats(br);
127 br_vlan_flush(br);
128 free_percpu(br->stats);
129}
130
122static int br_dev_open(struct net_device *dev) 131static int br_dev_open(struct net_device *dev)
123{ 132{
124 struct net_bridge *br = netdev_priv(dev); 133 struct net_bridge *br = netdev_priv(dev);
@@ -332,6 +341,7 @@ static const struct net_device_ops br_netdev_ops = {
332 .ndo_open = br_dev_open, 341 .ndo_open = br_dev_open,
333 .ndo_stop = br_dev_stop, 342 .ndo_stop = br_dev_stop,
334 .ndo_init = br_dev_init, 343 .ndo_init = br_dev_init,
344 .ndo_uninit = br_dev_uninit,
335 .ndo_start_xmit = br_dev_xmit, 345 .ndo_start_xmit = br_dev_xmit,
336 .ndo_get_stats64 = br_get_stats64, 346 .ndo_get_stats64 = br_get_stats64,
337 .ndo_set_mac_address = br_set_mac_address, 347 .ndo_set_mac_address = br_set_mac_address,
@@ -356,14 +366,6 @@ static const struct net_device_ops br_netdev_ops = {
356 .ndo_features_check = passthru_features_check, 366 .ndo_features_check = passthru_features_check,
357}; 367};
358 368
359static void br_dev_free(struct net_device *dev)
360{
361 struct net_bridge *br = netdev_priv(dev);
362
363 free_percpu(br->stats);
364 free_netdev(dev);
365}
366
367static struct device_type br_type = { 369static struct device_type br_type = {
368 .name = "bridge", 370 .name = "bridge",
369}; 371};
@@ -376,7 +378,7 @@ void br_dev_setup(struct net_device *dev)
376 ether_setup(dev); 378 ether_setup(dev);
377 379
378 dev->netdev_ops = &br_netdev_ops; 380 dev->netdev_ops = &br_netdev_ops;
379 dev->destructor = br_dev_free; 381 dev->destructor = free_netdev;
380 dev->ethtool_ops = &br_ethtool_ops; 382 dev->ethtool_ops = &br_ethtool_ops;
381 SET_NETDEV_DEVTYPE(dev, &br_type); 383 SET_NETDEV_DEVTYPE(dev, &br_type);
382 dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE; 384 dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 4f598dc2d916..6e08b7199dd7 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -106,7 +106,7 @@ static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
106 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; 106 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
107 struct net_bridge_fdb_entry *fdb; 107 struct net_bridge_fdb_entry *fdb;
108 108
109 WARN_ON_ONCE(!br_hash_lock_held(br)); 109 lockdep_assert_held_once(&br->hash_lock);
110 110
111 rcu_read_lock(); 111 rcu_read_lock();
112 fdb = fdb_find_rcu(head, addr, vid); 112 fdb = fdb_find_rcu(head, addr, vid);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 8ac1770aa222..56a2a72e7738 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -311,7 +311,6 @@ void br_dev_delete(struct net_device *dev, struct list_head *head)
311 311
312 br_fdb_delete_by_port(br, NULL, 0, 1); 312 br_fdb_delete_by_port(br, NULL, 0, 1);
313 313
314 br_vlan_flush(br);
315 br_multicast_dev_del(br); 314 br_multicast_dev_del(br);
316 cancel_delayed_work_sync(&br->gc_work); 315 cancel_delayed_work_sync(&br->gc_work);
317 316
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index b760f2620abf..faa7261a992f 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -2031,8 +2031,6 @@ void br_multicast_dev_del(struct net_bridge *br)
2031 2031
2032out: 2032out:
2033 spin_unlock_bh(&br->multicast_lock); 2033 spin_unlock_bh(&br->multicast_lock);
2034
2035 free_percpu(br->mcast_stats);
2036} 2034}
2037 2035
2038int br_multicast_set_router(struct net_bridge *br, unsigned long val) 2036int br_multicast_set_router(struct net_bridge *br, unsigned long val)
@@ -2531,6 +2529,11 @@ int br_multicast_init_stats(struct net_bridge *br)
2531 return 0; 2529 return 0;
2532} 2530}
2533 2531
2532void br_multicast_uninit_stats(struct net_bridge *br)
2533{
2534 free_percpu(br->mcast_stats);
2535}
2536
2534static void mcast_stats_add_dir(u64 *dst, u64 *src) 2537static void mcast_stats_add_dir(u64 *dst, u64 *src)
2535{ 2538{
2536 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 2539 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index fa87fbd62bb7..1f1e62095464 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -706,18 +706,20 @@ static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
706 706
707static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) 707static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
708{ 708{
709 struct nf_bridge_info *nf_bridge; 709 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
710 unsigned int mtu_reserved; 710 unsigned int mtu, mtu_reserved;
711 711
712 mtu_reserved = nf_bridge_mtu_reduction(skb); 712 mtu_reserved = nf_bridge_mtu_reduction(skb);
713 mtu = skb->dev->mtu;
714
715 if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
716 mtu = nf_bridge->frag_max_size;
713 717
714 if (skb_is_gso(skb) || skb->len + mtu_reserved <= skb->dev->mtu) { 718 if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) {
715 nf_bridge_info_free(skb); 719 nf_bridge_info_free(skb);
716 return br_dev_queue_push_xmit(net, sk, skb); 720 return br_dev_queue_push_xmit(net, sk, skb);
717 } 721 }
718 722
719 nf_bridge = nf_bridge_info_get(skb);
720
721 /* This is wrong! We should preserve the original fragment 723 /* This is wrong! We should preserve the original fragment
722 * boundaries by preserving frag_list rather than refragmenting. 724 * boundaries by preserving frag_list rather than refragmenting.
723 */ 725 */
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index a8f6acd23e30..225ef7d53701 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -1165,11 +1165,14 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev,
1165 spin_unlock_bh(&br->lock); 1165 spin_unlock_bh(&br->lock);
1166 } 1166 }
1167 1167
1168 err = br_changelink(dev, tb, data); 1168 err = register_netdevice(dev);
1169 if (err) 1169 if (err)
1170 return err; 1170 return err;
1171 1171
1172 return register_netdevice(dev); 1172 err = br_changelink(dev, tb, data);
1173 if (err)
1174 unregister_netdevice(dev);
1175 return err;
1173} 1176}
1174 1177
1175static size_t br_get_size(const struct net_device *brdev) 1178static size_t br_get_size(const struct net_device *brdev)
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 2288fca7756c..0d177280aa84 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -531,15 +531,6 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
531int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p, 531int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
532 const unsigned char *addr, u16 vid); 532 const unsigned char *addr, u16 vid);
533 533
534static inline bool br_hash_lock_held(struct net_bridge *br)
535{
536#ifdef CONFIG_LOCKDEP
537 return lockdep_is_held(&br->hash_lock);
538#else
539 return true;
540#endif
541}
542
543/* br_forward.c */ 534/* br_forward.c */
544enum br_pkt_type { 535enum br_pkt_type {
545 BR_PKT_UNICAST, 536 BR_PKT_UNICAST,
@@ -629,6 +620,7 @@ void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
629void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p, 620void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
630 const struct sk_buff *skb, u8 type, u8 dir); 621 const struct sk_buff *skb, u8 type, u8 dir);
631int br_multicast_init_stats(struct net_bridge *br); 622int br_multicast_init_stats(struct net_bridge *br);
623void br_multicast_uninit_stats(struct net_bridge *br);
632void br_multicast_get_stats(const struct net_bridge *br, 624void br_multicast_get_stats(const struct net_bridge *br,
633 const struct net_bridge_port *p, 625 const struct net_bridge_port *p,
634 struct br_mcast_stats *dest); 626 struct br_mcast_stats *dest);
@@ -769,6 +761,10 @@ static inline int br_multicast_init_stats(struct net_bridge *br)
769 return 0; 761 return 0;
770} 762}
771 763
764static inline void br_multicast_uninit_stats(struct net_bridge *br)
765{
766}
767
772static inline int br_multicast_igmp_type(const struct sk_buff *skb) 768static inline int br_multicast_igmp_type(const struct sk_buff *skb)
773{ 769{
774 return 0; 770 return 0;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 38dcf1eb427d..f76bb3332613 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -7,6 +7,7 @@
7#include <linux/kthread.h> 7#include <linux/kthread.h>
8#include <linux/net.h> 8#include <linux/net.h>
9#include <linux/nsproxy.h> 9#include <linux/nsproxy.h>
10#include <linux/sched/mm.h>
10#include <linux/slab.h> 11#include <linux/slab.h>
11#include <linux/socket.h> 12#include <linux/socket.h>
12#include <linux/string.h> 13#include <linux/string.h>
@@ -469,11 +470,16 @@ static int ceph_tcp_connect(struct ceph_connection *con)
469{ 470{
470 struct sockaddr_storage *paddr = &con->peer_addr.in_addr; 471 struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
471 struct socket *sock; 472 struct socket *sock;
473 unsigned int noio_flag;
472 int ret; 474 int ret;
473 475
474 BUG_ON(con->sock); 476 BUG_ON(con->sock);
477
478 /* sock_create_kern() allocates with GFP_KERNEL */
479 noio_flag = memalloc_noio_save();
475 ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family, 480 ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family,
476 SOCK_STREAM, IPPROTO_TCP, &sock); 481 SOCK_STREAM, IPPROTO_TCP, &sock);
482 memalloc_noio_restore(noio_flag);
477 if (ret) 483 if (ret)
478 return ret; 484 return ret;
479 sock->sk->sk_allocation = GFP_NOFS; 485 sock->sk->sk_allocation = GFP_NOFS;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index ea633342ab0d..f4947e737f34 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -398,7 +398,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
398 struct iov_iter *to, int len) 398 struct iov_iter *to, int len)
399{ 399{
400 int start = skb_headlen(skb); 400 int start = skb_headlen(skb);
401 int i, copy = start - offset; 401 int i, copy = start - offset, start_off = offset, n;
402 struct sk_buff *frag_iter; 402 struct sk_buff *frag_iter;
403 403
404 trace_skb_copy_datagram_iovec(skb, len); 404 trace_skb_copy_datagram_iovec(skb, len);
@@ -407,11 +407,12 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
407 if (copy > 0) { 407 if (copy > 0) {
408 if (copy > len) 408 if (copy > len)
409 copy = len; 409 copy = len;
410 if (copy_to_iter(skb->data + offset, copy, to) != copy) 410 n = copy_to_iter(skb->data + offset, copy, to);
411 offset += n;
412 if (n != copy)
411 goto short_copy; 413 goto short_copy;
412 if ((len -= copy) == 0) 414 if ((len -= copy) == 0)
413 return 0; 415 return 0;
414 offset += copy;
415 } 416 }
416 417
417 /* Copy paged appendix. Hmm... why does this look so complicated? */ 418 /* Copy paged appendix. Hmm... why does this look so complicated? */
@@ -425,13 +426,14 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
425 if ((copy = end - offset) > 0) { 426 if ((copy = end - offset) > 0) {
426 if (copy > len) 427 if (copy > len)
427 copy = len; 428 copy = len;
428 if (copy_page_to_iter(skb_frag_page(frag), 429 n = copy_page_to_iter(skb_frag_page(frag),
429 frag->page_offset + offset - 430 frag->page_offset + offset -
430 start, copy, to) != copy) 431 start, copy, to);
432 offset += n;
433 if (n != copy)
431 goto short_copy; 434 goto short_copy;
432 if (!(len -= copy)) 435 if (!(len -= copy))
433 return 0; 436 return 0;
434 offset += copy;
435 } 437 }
436 start = end; 438 start = end;
437 } 439 }
@@ -463,6 +465,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
463 */ 465 */
464 466
465fault: 467fault:
468 iov_iter_revert(to, offset - start_off);
466 return -EFAULT; 469 return -EFAULT;
467 470
468short_copy: 471short_copy:
@@ -613,7 +616,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
613 __wsum *csump) 616 __wsum *csump)
614{ 617{
615 int start = skb_headlen(skb); 618 int start = skb_headlen(skb);
616 int i, copy = start - offset; 619 int i, copy = start - offset, start_off = offset;
617 struct sk_buff *frag_iter; 620 struct sk_buff *frag_iter;
618 int pos = 0; 621 int pos = 0;
619 int n; 622 int n;
@@ -623,11 +626,11 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
623 if (copy > len) 626 if (copy > len)
624 copy = len; 627 copy = len;
625 n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to); 628 n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to);
629 offset += n;
626 if (n != copy) 630 if (n != copy)
627 goto fault; 631 goto fault;
628 if ((len -= copy) == 0) 632 if ((len -= copy) == 0)
629 return 0; 633 return 0;
630 offset += copy;
631 pos = copy; 634 pos = copy;
632 } 635 }
633 636
@@ -649,12 +652,12 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
649 offset - start, copy, 652 offset - start, copy,
650 &csum2, to); 653 &csum2, to);
651 kunmap(page); 654 kunmap(page);
655 offset += n;
652 if (n != copy) 656 if (n != copy)
653 goto fault; 657 goto fault;
654 *csump = csum_block_add(*csump, csum2, pos); 658 *csump = csum_block_add(*csump, csum2, pos);
655 if (!(len -= copy)) 659 if (!(len -= copy))
656 return 0; 660 return 0;
657 offset += copy;
658 pos += copy; 661 pos += copy;
659 } 662 }
660 start = end; 663 start = end;
@@ -687,6 +690,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
687 return 0; 690 return 0;
688 691
689fault: 692fault:
693 iov_iter_revert(to, offset - start_off);
690 return -EFAULT; 694 return -EFAULT;
691} 695}
692 696
@@ -771,6 +775,7 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
771 } 775 }
772 return 0; 776 return 0;
773csum_error: 777csum_error:
778 iov_iter_revert(&msg->msg_iter, chunk);
774 return -EINVAL; 779 return -EINVAL;
775fault: 780fault:
776 return -EFAULT; 781 return -EFAULT;
diff --git a/net/core/dev.c b/net/core/dev.c
index 7869ae3837ca..533a6d6f6092 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6757,7 +6757,6 @@ int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags)
6757 6757
6758 return err; 6758 return err;
6759} 6759}
6760EXPORT_SYMBOL(dev_change_xdp_fd);
6761 6760
6762/** 6761/**
6763 * dev_new_index - allocate an ifindex 6762 * dev_new_index - allocate an ifindex
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index c35aae13c8d2..d98d4998213d 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -390,7 +390,7 @@ mpls:
390 unsigned char ar_tip[4]; 390 unsigned char ar_tip[4];
391 } *arp_eth, _arp_eth; 391 } *arp_eth, _arp_eth;
392 const struct arphdr *arp; 392 const struct arphdr *arp;
393 struct arphdr *_arp; 393 struct arphdr _arp;
394 394
395 arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data, 395 arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
396 hlen, &_arp); 396 hlen, &_arp);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index e7c12caa20c8..4526cbd7e28a 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -860,7 +860,8 @@ static void neigh_probe(struct neighbour *neigh)
860 if (skb) 860 if (skb)
861 skb = skb_clone(skb, GFP_ATOMIC); 861 skb = skb_clone(skb, GFP_ATOMIC);
862 write_unlock(&neigh->lock); 862 write_unlock(&neigh->lock);
863 neigh->ops->solicit(neigh, skb); 863 if (neigh->ops->solicit)
864 neigh->ops->solicit(neigh, skb);
864 atomic_inc(&neigh->probes); 865 atomic_inc(&neigh->probes);
865 kfree_skb(skb); 866 kfree_skb(skb);
866} 867}
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 6ae56037bb13..029a61ac6cdd 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -71,27 +71,17 @@ static int update_classid_sock(const void *v, struct file *file, unsigned n)
71 return 0; 71 return 0;
72} 72}
73 73
74static void update_classid(struct cgroup_subsys_state *css, void *v) 74static void cgrp_attach(struct cgroup_taskset *tset)
75{ 75{
76 struct css_task_iter it; 76 struct cgroup_subsys_state *css;
77 struct task_struct *p; 77 struct task_struct *p;
78 78
79 css_task_iter_start(css, &it); 79 cgroup_taskset_for_each(p, css, tset) {
80 while ((p = css_task_iter_next(&it))) {
81 task_lock(p); 80 task_lock(p);
82 iterate_fd(p->files, 0, update_classid_sock, v); 81 iterate_fd(p->files, 0, update_classid_sock,
82 (void *)(unsigned long)css_cls_state(css)->classid);
83 task_unlock(p); 83 task_unlock(p);
84 } 84 }
85 css_task_iter_end(&it);
86}
87
88static void cgrp_attach(struct cgroup_taskset *tset)
89{
90 struct cgroup_subsys_state *css;
91
92 cgroup_taskset_first(tset, &css);
93 update_classid(css,
94 (void *)(unsigned long)css_cls_state(css)->classid);
95} 85}
96 86
97static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft) 87static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
@@ -103,12 +93,22 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
103 u64 value) 93 u64 value)
104{ 94{
105 struct cgroup_cls_state *cs = css_cls_state(css); 95 struct cgroup_cls_state *cs = css_cls_state(css);
96 struct css_task_iter it;
97 struct task_struct *p;
106 98
107 cgroup_sk_alloc_disable(); 99 cgroup_sk_alloc_disable();
108 100
109 cs->classid = (u32)value; 101 cs->classid = (u32)value;
110 102
111 update_classid(css, (void *)(unsigned long)cs->classid); 103 css_task_iter_start(css, &it);
104 while ((p = css_task_iter_next(&it))) {
105 task_lock(p);
106 iterate_fd(p->files, 0, update_classid_sock,
107 (void *)(unsigned long)cs->classid);
108 task_unlock(p);
109 }
110 css_task_iter_end(&it);
111
112 return 0; 112 return 0;
113} 113}
114 114
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 9424673009c1..29be2466970c 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -105,15 +105,21 @@ static void queue_process(struct work_struct *work)
105 while ((skb = skb_dequeue(&npinfo->txq))) { 105 while ((skb = skb_dequeue(&npinfo->txq))) {
106 struct net_device *dev = skb->dev; 106 struct net_device *dev = skb->dev;
107 struct netdev_queue *txq; 107 struct netdev_queue *txq;
108 unsigned int q_index;
108 109
109 if (!netif_device_present(dev) || !netif_running(dev)) { 110 if (!netif_device_present(dev) || !netif_running(dev)) {
110 kfree_skb(skb); 111 kfree_skb(skb);
111 continue; 112 continue;
112 } 113 }
113 114
114 txq = skb_get_tx_queue(dev, skb);
115
116 local_irq_save(flags); 115 local_irq_save(flags);
116 /* check if skb->queue_mapping is still valid */
117 q_index = skb_get_queue_mapping(skb);
118 if (unlikely(q_index >= dev->real_num_tx_queues)) {
119 q_index = q_index % dev->real_num_tx_queues;
120 skb_set_queue_mapping(skb, q_index);
121 }
122 txq = netdev_get_tx_queue(dev, q_index);
117 HARD_TX_LOCK(dev, txq, smp_processor_id()); 123 HARD_TX_LOCK(dev, txq, smp_processor_id());
118 if (netif_xmit_frozen_or_stopped(txq) || 124 if (netif_xmit_frozen_or_stopped(txq) ||
119 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) { 125 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 758f140b6bed..d28da7d363f1 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -20,9 +20,11 @@
20#include <net/tcp.h> 20#include <net/tcp.h>
21 21
22static siphash_key_t net_secret __read_mostly; 22static siphash_key_t net_secret __read_mostly;
23static siphash_key_t ts_secret __read_mostly;
23 24
24static __always_inline void net_secret_init(void) 25static __always_inline void net_secret_init(void)
25{ 26{
27 net_get_random_once(&ts_secret, sizeof(ts_secret));
26 net_get_random_once(&net_secret, sizeof(net_secret)); 28 net_get_random_once(&net_secret, sizeof(net_secret));
27} 29}
28#endif 30#endif
@@ -45,6 +47,23 @@ static u32 seq_scale(u32 seq)
45#endif 47#endif
46 48
47#if IS_ENABLED(CONFIG_IPV6) 49#if IS_ENABLED(CONFIG_IPV6)
50static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr)
51{
52 const struct {
53 struct in6_addr saddr;
54 struct in6_addr daddr;
55 } __aligned(SIPHASH_ALIGNMENT) combined = {
56 .saddr = *(struct in6_addr *)saddr,
57 .daddr = *(struct in6_addr *)daddr,
58 };
59
60 if (sysctl_tcp_timestamps != 1)
61 return 0;
62
63 return siphash(&combined, offsetofend(typeof(combined), daddr),
64 &ts_secret);
65}
66
48u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, 67u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
49 __be16 sport, __be16 dport, u32 *tsoff) 68 __be16 sport, __be16 dport, u32 *tsoff)
50{ 69{
@@ -63,7 +82,7 @@ u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
63 net_secret_init(); 82 net_secret_init();
64 hash = siphash(&combined, offsetofend(typeof(combined), dport), 83 hash = siphash(&combined, offsetofend(typeof(combined), dport),
65 &net_secret); 84 &net_secret);
66 *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0; 85 *tsoff = secure_tcpv6_ts_off(saddr, daddr);
67 return seq_scale(hash); 86 return seq_scale(hash);
68} 87}
69EXPORT_SYMBOL(secure_tcpv6_sequence_number); 88EXPORT_SYMBOL(secure_tcpv6_sequence_number);
@@ -88,6 +107,14 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
88#endif 107#endif
89 108
90#ifdef CONFIG_INET 109#ifdef CONFIG_INET
110static u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr)
111{
112 if (sysctl_tcp_timestamps != 1)
113 return 0;
114
115 return siphash_2u32((__force u32)saddr, (__force u32)daddr,
116 &ts_secret);
117}
91 118
92/* secure_tcp_sequence_number(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d), 119/* secure_tcp_sequence_number(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d),
93 * but fortunately, `sport' cannot be 0 in any circumstances. If this changes, 120 * but fortunately, `sport' cannot be 0 in any circumstances. If this changes,
@@ -103,7 +130,7 @@ u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
103 hash = siphash_3u32((__force u32)saddr, (__force u32)daddr, 130 hash = siphash_3u32((__force u32)saddr, (__force u32)daddr,
104 (__force u32)sport << 16 | (__force u32)dport, 131 (__force u32)sport << 16 | (__force u32)dport,
105 &net_secret); 132 &net_secret);
106 *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0; 133 *tsoff = secure_tcp_ts_off(saddr, daddr);
107 return seq_scale(hash); 134 return seq_scale(hash);
108} 135}
109 136
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index cd4ba8c6b609..f86bf69cfb8d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3082,22 +3082,32 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
3082 if (sg && csum && (mss != GSO_BY_FRAGS)) { 3082 if (sg && csum && (mss != GSO_BY_FRAGS)) {
3083 if (!(features & NETIF_F_GSO_PARTIAL)) { 3083 if (!(features & NETIF_F_GSO_PARTIAL)) {
3084 struct sk_buff *iter; 3084 struct sk_buff *iter;
3085 unsigned int frag_len;
3085 3086
3086 if (!list_skb || 3087 if (!list_skb ||
3087 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) 3088 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
3088 goto normal; 3089 goto normal;
3089 3090
3090 /* Split the buffer at the frag_list pointer. 3091 /* If we get here then all the required
3091 * This is based on the assumption that all 3092 * GSO features except frag_list are supported.
3092 * buffers in the chain excluding the last 3093 * Try to split the SKB to multiple GSO SKBs
3093 * containing the same amount of data. 3094 * with no frag_list.
3095 * Currently we can do that only when the buffers don't
3096 * have a linear part and all the buffers except
3097 * the last are of the same length.
3094 */ 3098 */
3099 frag_len = list_skb->len;
3095 skb_walk_frags(head_skb, iter) { 3100 skb_walk_frags(head_skb, iter) {
3101 if (frag_len != iter->len && iter->next)
3102 goto normal;
3096 if (skb_headlen(iter)) 3103 if (skb_headlen(iter))
3097 goto normal; 3104 goto normal;
3098 3105
3099 len -= iter->len; 3106 len -= iter->len;
3100 } 3107 }
3108
3109 if (len != frag_len)
3110 goto normal;
3101 } 3111 }
3102 3112
3103 /* GSO partial only requires that we trim off any excess that 3113 /* GSO partial only requires that we trim off any excess that
@@ -3694,6 +3704,15 @@ static void sock_rmem_free(struct sk_buff *skb)
3694 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 3704 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
3695} 3705}
3696 3706
3707static void skb_set_err_queue(struct sk_buff *skb)
3708{
3709 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
3710 * So, it is safe to (mis)use it to mark skbs on the error queue.
3711 */
3712 skb->pkt_type = PACKET_OUTGOING;
3713 BUILD_BUG_ON(PACKET_OUTGOING == 0);
3714}
3715
3697/* 3716/*
3698 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 3717 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
3699 */ 3718 */
@@ -3707,6 +3726,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3707 skb->sk = sk; 3726 skb->sk = sk;
3708 skb->destructor = sock_rmem_free; 3727 skb->destructor = sock_rmem_free;
3709 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 3728 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
3729 skb_set_err_queue(skb);
3710 3730
3711 /* before exiting rcu section, make sure dst is refcounted */ 3731 /* before exiting rcu section, make sure dst is refcounted */
3712 skb_dst_force(skb); 3732 skb_dst_force(skb);
@@ -3783,16 +3803,21 @@ EXPORT_SYMBOL(skb_clone_sk);
3783 3803
3784static void __skb_complete_tx_timestamp(struct sk_buff *skb, 3804static void __skb_complete_tx_timestamp(struct sk_buff *skb,
3785 struct sock *sk, 3805 struct sock *sk,
3786 int tstype) 3806 int tstype,
3807 bool opt_stats)
3787{ 3808{
3788 struct sock_exterr_skb *serr; 3809 struct sock_exterr_skb *serr;
3789 int err; 3810 int err;
3790 3811
3812 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
3813
3791 serr = SKB_EXT_ERR(skb); 3814 serr = SKB_EXT_ERR(skb);
3792 memset(serr, 0, sizeof(*serr)); 3815 memset(serr, 0, sizeof(*serr));
3793 serr->ee.ee_errno = ENOMSG; 3816 serr->ee.ee_errno = ENOMSG;
3794 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3817 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3795 serr->ee.ee_info = tstype; 3818 serr->ee.ee_info = tstype;
3819 serr->opt_stats = opt_stats;
3820 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
3796 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { 3821 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
3797 serr->ee.ee_data = skb_shinfo(skb)->tskey; 3822 serr->ee.ee_data = skb_shinfo(skb)->tskey;
3798 if (sk->sk_protocol == IPPROTO_TCP && 3823 if (sk->sk_protocol == IPPROTO_TCP &&
@@ -3833,7 +3858,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
3833 */ 3858 */
3834 if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) { 3859 if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
3835 *skb_hwtstamps(skb) = *hwtstamps; 3860 *skb_hwtstamps(skb) = *hwtstamps;
3836 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); 3861 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
3837 sock_put(sk); 3862 sock_put(sk);
3838 } 3863 }
3839} 3864}
@@ -3844,7 +3869,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
3844 struct sock *sk, int tstype) 3869 struct sock *sk, int tstype)
3845{ 3870{
3846 struct sk_buff *skb; 3871 struct sk_buff *skb;
3847 bool tsonly; 3872 bool tsonly, opt_stats = false;
3848 3873
3849 if (!sk) 3874 if (!sk)
3850 return; 3875 return;
@@ -3857,9 +3882,10 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
3857#ifdef CONFIG_INET 3882#ifdef CONFIG_INET
3858 if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && 3883 if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
3859 sk->sk_protocol == IPPROTO_TCP && 3884 sk->sk_protocol == IPPROTO_TCP &&
3860 sk->sk_type == SOCK_STREAM) 3885 sk->sk_type == SOCK_STREAM) {
3861 skb = tcp_get_timestamping_opt_stats(sk); 3886 skb = tcp_get_timestamping_opt_stats(sk);
3862 else 3887 opt_stats = true;
3888 } else
3863#endif 3889#endif
3864 skb = alloc_skb(0, GFP_ATOMIC); 3890 skb = alloc_skb(0, GFP_ATOMIC);
3865 } else { 3891 } else {
@@ -3878,7 +3904,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
3878 else 3904 else
3879 skb->tstamp = ktime_get_real(); 3905 skb->tstamp = ktime_get_real();
3880 3906
3881 __skb_complete_tx_timestamp(skb, sk, tstype); 3907 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
3882} 3908}
3883EXPORT_SYMBOL_GPL(__skb_tstamp_tx); 3909EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
3884 3910
diff --git a/net/core/sock.c b/net/core/sock.c
index a96d5f7a5734..2c4f574168fb 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1442,6 +1442,11 @@ static void __sk_destruct(struct rcu_head *head)
1442 pr_debug("%s: optmem leakage (%d bytes) detected\n", 1442 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1443 __func__, atomic_read(&sk->sk_omem_alloc)); 1443 __func__, atomic_read(&sk->sk_omem_alloc));
1444 1444
1445 if (sk->sk_frag.page) {
1446 put_page(sk->sk_frag.page);
1447 sk->sk_frag.page = NULL;
1448 }
1449
1445 if (sk->sk_peer_cred) 1450 if (sk->sk_peer_cred)
1446 put_cred(sk->sk_peer_cred); 1451 put_cred(sk->sk_peer_cred);
1447 put_pid(sk->sk_peer_pid); 1452 put_pid(sk->sk_peer_pid);
@@ -1539,6 +1544,12 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1539 is_charged = sk_filter_charge(newsk, filter); 1544 is_charged = sk_filter_charge(newsk, filter);
1540 1545
1541 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { 1546 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
1547 /* We need to make sure that we don't uncharge the new
1548 * socket if we couldn't charge it in the first place
1549 * as otherwise we uncharge the parent's filter.
1550 */
1551 if (!is_charged)
1552 RCU_INIT_POINTER(newsk->sk_filter, NULL);
1542 sk_free_unlock_clone(newsk); 1553 sk_free_unlock_clone(newsk);
1543 newsk = NULL; 1554 newsk = NULL;
1544 goto out; 1555 goto out;
@@ -2787,11 +2798,6 @@ void sk_common_release(struct sock *sk)
2787 2798
2788 sk_refcnt_debug_release(sk); 2799 sk_refcnt_debug_release(sk);
2789 2800
2790 if (sk->sk_frag.page) {
2791 put_page(sk->sk_frag.page);
2792 sk->sk_frag.page = NULL;
2793 }
2794
2795 sock_put(sk); 2801 sock_put(sk);
2796} 2802}
2797EXPORT_SYMBOL(sk_common_release); 2803EXPORT_SYMBOL(sk_common_release);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 4ead336e14ea..7f9cc400eca0 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -408,14 +408,16 @@ static struct ctl_table net_core_table[] = {
408 .data = &sysctl_net_busy_poll, 408 .data = &sysctl_net_busy_poll,
409 .maxlen = sizeof(unsigned int), 409 .maxlen = sizeof(unsigned int),
410 .mode = 0644, 410 .mode = 0644,
411 .proc_handler = proc_dointvec 411 .proc_handler = proc_dointvec_minmax,
412 .extra1 = &zero,
412 }, 413 },
413 { 414 {
414 .procname = "busy_read", 415 .procname = "busy_read",
415 .data = &sysctl_net_busy_read, 416 .data = &sysctl_net_busy_read,
416 .maxlen = sizeof(unsigned int), 417 .maxlen = sizeof(unsigned int),
417 .mode = 0644, 418 .mode = 0644,
418 .proc_handler = proc_dointvec 419 .proc_handler = proc_dointvec_minmax,
420 .extra1 = &zero,
419 }, 421 },
420#endif 422#endif
421#ifdef CONFIG_NET_SCHED 423#ifdef CONFIG_NET_SCHED
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 42bfd08109dd..8f2133ffc2ff 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1083,7 +1083,8 @@ static void nl_fib_input(struct sk_buff *skb)
1083 1083
1084 net = sock_net(skb->sk); 1084 net = sock_net(skb->sk);
1085 nlh = nlmsg_hdr(skb); 1085 nlh = nlmsg_hdr(skb);
1086 if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len || 1086 if (skb->len < nlmsg_total_size(sizeof(*frn)) ||
1087 skb->len < nlh->nlmsg_len ||
1087 nlmsg_len(nlh) < sizeof(*frn)) 1088 nlmsg_len(nlh) < sizeof(*frn))
1088 return; 1089 return;
1089 1090
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index bbe7f72db9c1..b3cdeec85f1f 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -198,6 +198,7 @@ static void ip_expire(unsigned long arg)
198 qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); 198 qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
199 net = container_of(qp->q.net, struct net, ipv4.frags); 199 net = container_of(qp->q.net, struct net, ipv4.frags);
200 200
201 rcu_read_lock();
201 spin_lock(&qp->q.lock); 202 spin_lock(&qp->q.lock);
202 203
203 if (qp->q.flags & INET_FRAG_COMPLETE) 204 if (qp->q.flags & INET_FRAG_COMPLETE)
@@ -207,7 +208,7 @@ static void ip_expire(unsigned long arg)
207 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); 208 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
208 209
209 if (!inet_frag_evicting(&qp->q)) { 210 if (!inet_frag_evicting(&qp->q)) {
210 struct sk_buff *head = qp->q.fragments; 211 struct sk_buff *clone, *head = qp->q.fragments;
211 const struct iphdr *iph; 212 const struct iphdr *iph;
212 int err; 213 int err;
213 214
@@ -216,32 +217,40 @@ static void ip_expire(unsigned long arg)
216 if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments) 217 if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
217 goto out; 218 goto out;
218 219
219 rcu_read_lock();
220 head->dev = dev_get_by_index_rcu(net, qp->iif); 220 head->dev = dev_get_by_index_rcu(net, qp->iif);
221 if (!head->dev) 221 if (!head->dev)
222 goto out_rcu_unlock; 222 goto out;
223
223 224
224 /* skb has no dst, perform route lookup again */ 225 /* skb has no dst, perform route lookup again */
225 iph = ip_hdr(head); 226 iph = ip_hdr(head);
226 err = ip_route_input_noref(head, iph->daddr, iph->saddr, 227 err = ip_route_input_noref(head, iph->daddr, iph->saddr,
227 iph->tos, head->dev); 228 iph->tos, head->dev);
228 if (err) 229 if (err)
229 goto out_rcu_unlock; 230 goto out;
230 231
231 /* Only an end host needs to send an ICMP 232 /* Only an end host needs to send an ICMP
232 * "Fragment Reassembly Timeout" message, per RFC792. 233 * "Fragment Reassembly Timeout" message, per RFC792.
233 */ 234 */
234 if (frag_expire_skip_icmp(qp->user) && 235 if (frag_expire_skip_icmp(qp->user) &&
235 (skb_rtable(head)->rt_type != RTN_LOCAL)) 236 (skb_rtable(head)->rt_type != RTN_LOCAL))
236 goto out_rcu_unlock; 237 goto out;
238
239 clone = skb_clone(head, GFP_ATOMIC);
237 240
238 /* Send an ICMP "Fragment Reassembly Timeout" message. */ 241 /* Send an ICMP "Fragment Reassembly Timeout" message. */
239 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); 242 if (clone) {
240out_rcu_unlock: 243 spin_unlock(&qp->q.lock);
241 rcu_read_unlock(); 244 icmp_send(clone, ICMP_TIME_EXCEEDED,
245 ICMP_EXC_FRAGTIME, 0);
246 consume_skb(clone);
247 goto out_rcu_unlock;
248 }
242 } 249 }
243out: 250out:
244 spin_unlock(&qp->q.lock); 251 spin_unlock(&qp->q.lock);
252out_rcu_unlock:
253 rcu_read_unlock();
245 ipq_put(qp); 254 ipq_put(qp);
246} 255}
247 256
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index ebd953bc5607..1d46d05efb0f 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -488,16 +488,15 @@ static bool ipv4_datagram_support_cmsg(const struct sock *sk,
488 return false; 488 return false;
489 489
490 /* Support IP_PKTINFO on tstamp packets if requested, to correlate 490 /* Support IP_PKTINFO on tstamp packets if requested, to correlate
491 * timestamp with egress dev. Not possible for packets without dev 491 * timestamp with egress dev. Not possible for packets without iif
492 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY). 492 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
493 */ 493 */
494 if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) || 494 info = PKTINFO_SKB_CB(skb);
495 (!skb->dev)) 495 if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) ||
496 !info->ipi_ifindex)
496 return false; 497 return false;
497 498
498 info = PKTINFO_SKB_CB(skb);
499 info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr; 499 info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
500 info->ipi_ifindex = skb->dev->ifindex;
501 return true; 500 return true;
502} 501}
503 502
@@ -591,6 +590,7 @@ static bool setsockopt_needs_rtnl(int optname)
591 case MCAST_LEAVE_GROUP: 590 case MCAST_LEAVE_GROUP:
592 case MCAST_LEAVE_SOURCE_GROUP: 591 case MCAST_LEAVE_SOURCE_GROUP:
593 case MCAST_UNBLOCK_SOURCE: 592 case MCAST_UNBLOCK_SOURCE:
593 case IP_ROUTER_ALERT:
594 return true; 594 return true;
595 } 595 }
596 return false; 596 return false;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index fd9f34bbd740..dfb2ab2dd3c8 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -306,7 +306,7 @@ static void __init ic_close_devs(void)
306 while ((d = next)) { 306 while ((d = next)) {
307 next = d->next; 307 next = d->next;
308 dev = d->dev; 308 dev = d->dev;
309 if ((!ic_dev || dev != ic_dev->dev) && !netdev_uses_dsa(dev)) { 309 if (d != ic_dev && !netdev_uses_dsa(dev)) {
310 pr_debug("IP-Config: Downing %s\n", dev->name); 310 pr_debug("IP-Config: Downing %s\n", dev->name);
311 dev_change_flags(dev, d->flags); 311 dev_change_flags(dev, d->flags);
312 } 312 }
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index c0317c940bcd..b036e85e093b 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1278,7 +1278,7 @@ static void mrtsock_destruct(struct sock *sk)
1278 struct net *net = sock_net(sk); 1278 struct net *net = sock_net(sk);
1279 struct mr_table *mrt; 1279 struct mr_table *mrt;
1280 1280
1281 rtnl_lock(); 1281 ASSERT_RTNL();
1282 ipmr_for_each_table(mrt, net) { 1282 ipmr_for_each_table(mrt, net) {
1283 if (sk == rtnl_dereference(mrt->mroute_sk)) { 1283 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1284 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; 1284 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
@@ -1289,7 +1289,6 @@ static void mrtsock_destruct(struct sock *sk)
1289 mroute_clean_tables(mrt, false); 1289 mroute_clean_tables(mrt, false);
1290 } 1290 }
1291 } 1291 }
1292 rtnl_unlock();
1293} 1292}
1294 1293
1295/* Socket options and virtual interface manipulation. The whole 1294/* Socket options and virtual interface manipulation. The whole
@@ -1353,13 +1352,8 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
1353 if (sk != rcu_access_pointer(mrt->mroute_sk)) { 1352 if (sk != rcu_access_pointer(mrt->mroute_sk)) {
1354 ret = -EACCES; 1353 ret = -EACCES;
1355 } else { 1354 } else {
1356 /* We need to unlock here because mrtsock_destruct takes
1357 * care of rtnl itself and we can't change that due to
1358 * the IP_ROUTER_ALERT setsockopt which runs without it.
1359 */
1360 rtnl_unlock();
1361 ret = ip_ra_control(sk, 0, NULL); 1355 ret = ip_ra_control(sk, 0, NULL);
1362 goto out; 1356 goto out_unlock;
1363 } 1357 }
1364 break; 1358 break;
1365 case MRT_ADD_VIF: 1359 case MRT_ADD_VIF:
@@ -1470,7 +1464,6 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
1470 } 1464 }
1471out_unlock: 1465out_unlock:
1472 rtnl_unlock(); 1466 rtnl_unlock();
1473out:
1474 return ret; 1467 return ret;
1475} 1468}
1476 1469
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 52f26459efc3..9b8841316e7b 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -461,7 +461,7 @@ static void clusterip_tg_destroy(const struct xt_tgdtor_param *par)
461 461
462 clusterip_config_put(cipinfo->config); 462 clusterip_config_put(cipinfo->config);
463 463
464 nf_ct_netns_get(par->net, par->family); 464 nf_ct_netns_put(par->net, par->family);
465} 465}
466 466
467#ifdef CONFIG_COMPAT 467#ifdef CONFIG_COMPAT
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index bc1486f2c064..2e14ed11a35c 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -165,6 +165,10 @@ static unsigned int ipv4_conntrack_local(void *priv,
165 if (skb->len < sizeof(struct iphdr) || 165 if (skb->len < sizeof(struct iphdr) ||
166 ip_hdrlen(skb) < sizeof(struct iphdr)) 166 ip_hdrlen(skb) < sizeof(struct iphdr))
167 return NF_ACCEPT; 167 return NF_ACCEPT;
168
169 if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */
170 return NF_ACCEPT;
171
168 return nf_conntrack_in(state->net, PF_INET, state->hook, skb); 172 return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
169} 173}
170 174
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index f8aad03d674b..6f5e8d01b876 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -255,11 +255,6 @@ nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
255 /* maniptype == SRC for postrouting. */ 255 /* maniptype == SRC for postrouting. */
256 enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook); 256 enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
257 257
258 /* We never see fragments: conntrack defrags on pre-routing
259 * and local-out, and nf_nat_out protects post-routing.
260 */
261 NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));
262
263 ct = nf_ct_get(skb, &ctinfo); 258 ct = nf_ct_get(skb, &ctinfo);
264 /* Can't track? It's not due to stress, or conntrack would 259 /* Can't track? It's not due to stress, or conntrack would
265 * have dropped it. Hence it's the user's responsibilty to 260 * have dropped it. Hence it's the user's responsibilty to
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index c9b52c361da2..53e49f5011d3 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1260,16 +1260,6 @@ static const struct nf_conntrack_expect_policy snmp_exp_policy = {
1260 .timeout = 180, 1260 .timeout = 180,
1261}; 1261};
1262 1262
1263static struct nf_conntrack_helper snmp_helper __read_mostly = {
1264 .me = THIS_MODULE,
1265 .help = help,
1266 .expect_policy = &snmp_exp_policy,
1267 .name = "snmp",
1268 .tuple.src.l3num = AF_INET,
1269 .tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT),
1270 .tuple.dst.protonum = IPPROTO_UDP,
1271};
1272
1273static struct nf_conntrack_helper snmp_trap_helper __read_mostly = { 1263static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
1274 .me = THIS_MODULE, 1264 .me = THIS_MODULE,
1275 .help = help, 1265 .help = help,
@@ -1288,22 +1278,16 @@ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
1288 1278
1289static int __init nf_nat_snmp_basic_init(void) 1279static int __init nf_nat_snmp_basic_init(void)
1290{ 1280{
1291 int ret = 0;
1292
1293 BUG_ON(nf_nat_snmp_hook != NULL); 1281 BUG_ON(nf_nat_snmp_hook != NULL);
1294 RCU_INIT_POINTER(nf_nat_snmp_hook, help); 1282 RCU_INIT_POINTER(nf_nat_snmp_hook, help);
1295 1283
1296 ret = nf_conntrack_helper_register(&snmp_trap_helper); 1284 return nf_conntrack_helper_register(&snmp_trap_helper);
1297 if (ret < 0) {
1298 nf_conntrack_helper_unregister(&snmp_helper);
1299 return ret;
1300 }
1301 return ret;
1302} 1285}
1303 1286
1304static void __exit nf_nat_snmp_basic_fini(void) 1287static void __exit nf_nat_snmp_basic_fini(void)
1305{ 1288{
1306 RCU_INIT_POINTER(nf_nat_snmp_hook, NULL); 1289 RCU_INIT_POINTER(nf_nat_snmp_hook, NULL);
1290 synchronize_rcu();
1307 nf_conntrack_helper_unregister(&snmp_trap_helper); 1291 nf_conntrack_helper_unregister(&snmp_trap_helper);
1308} 1292}
1309 1293
diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c
index a0ea8aad1bf1..f18677277119 100644
--- a/net/ipv4/netfilter/nft_masq_ipv4.c
+++ b/net/ipv4/netfilter/nft_masq_ipv4.c
@@ -26,10 +26,10 @@ static void nft_masq_ipv4_eval(const struct nft_expr *expr,
26 memset(&range, 0, sizeof(range)); 26 memset(&range, 0, sizeof(range));
27 range.flags = priv->flags; 27 range.flags = priv->flags;
28 if (priv->sreg_proto_min) { 28 if (priv->sreg_proto_min) {
29 range.min_proto.all = 29 range.min_proto.all = (__force __be16)nft_reg_load16(
30 *(__be16 *)&regs->data[priv->sreg_proto_min]; 30 &regs->data[priv->sreg_proto_min]);
31 range.max_proto.all = 31 range.max_proto.all = (__force __be16)nft_reg_load16(
32 *(__be16 *)&regs->data[priv->sreg_proto_max]; 32 &regs->data[priv->sreg_proto_max]);
33 } 33 }
34 regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, nft_hook(pkt), 34 regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, nft_hook(pkt),
35 &range, nft_out(pkt)); 35 &range, nft_out(pkt));
diff --git a/net/ipv4/netfilter/nft_redir_ipv4.c b/net/ipv4/netfilter/nft_redir_ipv4.c
index 1650ed23c15d..5120be1d3118 100644
--- a/net/ipv4/netfilter/nft_redir_ipv4.c
+++ b/net/ipv4/netfilter/nft_redir_ipv4.c
@@ -26,10 +26,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr,
26 26
27 memset(&mr, 0, sizeof(mr)); 27 memset(&mr, 0, sizeof(mr));
28 if (priv->sreg_proto_min) { 28 if (priv->sreg_proto_min) {
29 mr.range[0].min.all = 29 mr.range[0].min.all = (__force __be16)nft_reg_load16(
30 *(__be16 *)&regs->data[priv->sreg_proto_min]; 30 &regs->data[priv->sreg_proto_min]);
31 mr.range[0].max.all = 31 mr.range[0].max.all = (__force __be16)nft_reg_load16(
32 *(__be16 *)&regs->data[priv->sreg_proto_max]; 32 &regs->data[priv->sreg_proto_max]);
33 mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 33 mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
34 } 34 }
35 35
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 2af6244b83e2..ccfbce13a633 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -156,17 +156,18 @@ int ping_hash(struct sock *sk)
156void ping_unhash(struct sock *sk) 156void ping_unhash(struct sock *sk)
157{ 157{
158 struct inet_sock *isk = inet_sk(sk); 158 struct inet_sock *isk = inet_sk(sk);
159
159 pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); 160 pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
161 write_lock_bh(&ping_table.lock);
160 if (sk_hashed(sk)) { 162 if (sk_hashed(sk)) {
161 write_lock_bh(&ping_table.lock);
162 hlist_nulls_del(&sk->sk_nulls_node); 163 hlist_nulls_del(&sk->sk_nulls_node);
163 sk_nulls_node_init(&sk->sk_nulls_node); 164 sk_nulls_node_init(&sk->sk_nulls_node);
164 sock_put(sk); 165 sock_put(sk);
165 isk->inet_num = 0; 166 isk->inet_num = 0;
166 isk->inet_sport = 0; 167 isk->inet_sport = 0;
167 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 168 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
168 write_unlock_bh(&ping_table.lock);
169 } 169 }
170 write_unlock_bh(&ping_table.lock);
170} 171}
171EXPORT_SYMBOL_GPL(ping_unhash); 172EXPORT_SYMBOL_GPL(ping_unhash);
172 173
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 8119e1f66e03..9d943974de2b 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -682,7 +682,9 @@ static void raw_close(struct sock *sk, long timeout)
682 /* 682 /*
683 * Raw sockets may have direct kernel references. Kill them. 683 * Raw sockets may have direct kernel references. Kill them.
684 */ 684 */
685 rtnl_lock();
685 ip_ra_control(sk, 0, NULL); 686 ip_ra_control(sk, 0, NULL);
687 rtnl_unlock();
686 688
687 sk_common_release(sk); 689 sk_common_release(sk);
688} 690}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 8471dd116771..acd69cfe2951 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2620,7 +2620,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
2620 skb_reset_network_header(skb); 2620 skb_reset_network_header(skb);
2621 2621
2622 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */ 2622 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2623 ip_hdr(skb)->protocol = IPPROTO_ICMP; 2623 ip_hdr(skb)->protocol = IPPROTO_UDP;
2624 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr)); 2624 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2625 2625
2626 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0; 2626 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index cf4555581282..40ba4249a586 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2322,6 +2322,7 @@ int tcp_disconnect(struct sock *sk, int flags)
2322 tcp_init_send_head(sk); 2322 tcp_init_send_head(sk);
2323 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 2323 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2324 __sk_dst_reset(sk); 2324 __sk_dst_reset(sk);
2325 tcp_saved_syn_free(tp);
2325 2326
2326 /* Clean up fastopen related fields */ 2327 /* Clean up fastopen related fields */
2327 tcp_free_fastopen_req(tp); 2328 tcp_free_fastopen_req(tp);
@@ -2770,7 +2771,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2770{ 2771{
2771 const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ 2772 const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
2772 const struct inet_connection_sock *icsk = inet_csk(sk); 2773 const struct inet_connection_sock *icsk = inet_csk(sk);
2773 u32 now = tcp_time_stamp, intv; 2774 u32 now, intv;
2774 u64 rate64; 2775 u64 rate64;
2775 bool slow; 2776 bool slow;
2776 u32 rate; 2777 u32 rate;
@@ -2839,6 +2840,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2839 info->tcpi_retrans = tp->retrans_out; 2840 info->tcpi_retrans = tp->retrans_out;
2840 info->tcpi_fackets = tp->fackets_out; 2841 info->tcpi_fackets = tp->fackets_out;
2841 2842
2843 now = tcp_time_stamp;
2842 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); 2844 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2843 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); 2845 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
2844 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); 2846 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 39c393cc0fd3..659d1baefb2b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -126,7 +126,8 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
126#define REXMIT_LOST 1 /* retransmit packets marked lost */ 126#define REXMIT_LOST 1 /* retransmit packets marked lost */
127#define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */ 127#define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */
128 128
129static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb) 129static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb,
130 unsigned int len)
130{ 131{
131 static bool __once __read_mostly; 132 static bool __once __read_mostly;
132 133
@@ -137,8 +138,9 @@ static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb)
137 138
138 rcu_read_lock(); 139 rcu_read_lock();
139 dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif); 140 dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif);
140 pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n", 141 if (!dev || len >= dev->mtu)
141 dev ? dev->name : "Unknown driver"); 142 pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n",
143 dev ? dev->name : "Unknown driver");
142 rcu_read_unlock(); 144 rcu_read_unlock();
143 } 145 }
144} 146}
@@ -161,8 +163,10 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
161 if (len >= icsk->icsk_ack.rcv_mss) { 163 if (len >= icsk->icsk_ack.rcv_mss) {
162 icsk->icsk_ack.rcv_mss = min_t(unsigned int, len, 164 icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
163 tcp_sk(sk)->advmss); 165 tcp_sk(sk)->advmss);
164 if (unlikely(icsk->icsk_ack.rcv_mss != len)) 166 /* Account for possibly-removed options */
165 tcp_gro_dev_warn(sk, skb); 167 if (unlikely(len > icsk->icsk_ack.rcv_mss +
168 MAX_TCP_OPTION_SPACE))
169 tcp_gro_dev_warn(sk, skb, len);
166 } else { 170 } else {
167 /* Otherwise, we make more careful check taking into account, 171 /* Otherwise, we make more careful check taking into account,
168 * that SACKs block is variable. 172 * that SACKs block is variable.
@@ -874,22 +878,11 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
874 const int ts) 878 const int ts)
875{ 879{
876 struct tcp_sock *tp = tcp_sk(sk); 880 struct tcp_sock *tp = tcp_sk(sk);
877 if (metric > tp->reordering) { 881 int mib_idx;
878 int mib_idx;
879 882
883 if (metric > tp->reordering) {
880 tp->reordering = min(sysctl_tcp_max_reordering, metric); 884 tp->reordering = min(sysctl_tcp_max_reordering, metric);
881 885
882 /* This exciting event is worth to be remembered. 8) */
883 if (ts)
884 mib_idx = LINUX_MIB_TCPTSREORDER;
885 else if (tcp_is_reno(tp))
886 mib_idx = LINUX_MIB_TCPRENOREORDER;
887 else if (tcp_is_fack(tp))
888 mib_idx = LINUX_MIB_TCPFACKREORDER;
889 else
890 mib_idx = LINUX_MIB_TCPSACKREORDER;
891
892 NET_INC_STATS(sock_net(sk), mib_idx);
893#if FASTRETRANS_DEBUG > 1 886#if FASTRETRANS_DEBUG > 1
894 pr_debug("Disorder%d %d %u f%u s%u rr%d\n", 887 pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
895 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 888 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -902,6 +895,18 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
902 } 895 }
903 896
904 tp->rack.reord = 1; 897 tp->rack.reord = 1;
898
899 /* This exciting event is worth to be remembered. 8) */
900 if (ts)
901 mib_idx = LINUX_MIB_TCPTSREORDER;
902 else if (tcp_is_reno(tp))
903 mib_idx = LINUX_MIB_TCPRENOREORDER;
904 else if (tcp_is_fack(tp))
905 mib_idx = LINUX_MIB_TCPFACKREORDER;
906 else
907 mib_idx = LINUX_MIB_TCPSACKREORDER;
908
909 NET_INC_STATS(sock_net(sk), mib_idx);
905} 910}
906 911
907/* This must be called before lost_out is incremented */ 912/* This must be called before lost_out is incremented */
@@ -1930,6 +1935,7 @@ void tcp_enter_loss(struct sock *sk)
1930 struct tcp_sock *tp = tcp_sk(sk); 1935 struct tcp_sock *tp = tcp_sk(sk);
1931 struct net *net = sock_net(sk); 1936 struct net *net = sock_net(sk);
1932 struct sk_buff *skb; 1937 struct sk_buff *skb;
1938 bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
1933 bool is_reneg; /* is receiver reneging on SACKs? */ 1939 bool is_reneg; /* is receiver reneging on SACKs? */
1934 bool mark_lost; 1940 bool mark_lost;
1935 1941
@@ -1989,15 +1995,18 @@ void tcp_enter_loss(struct sock *sk)
1989 tp->high_seq = tp->snd_nxt; 1995 tp->high_seq = tp->snd_nxt;
1990 tcp_ecn_queue_cwr(tp); 1996 tcp_ecn_queue_cwr(tp);
1991 1997
1992 /* F-RTO RFC5682 sec 3.1 step 1 mandates to disable F-RTO 1998 /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
1993 * if a previous recovery is underway, otherwise it may incorrectly 1999 * loss recovery is underway except recurring timeout(s) on
1994 * call a timeout spurious if some previously retransmitted packets 2000 * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
1995 * are s/acked (sec 3.2). We do not apply that retriction since 2001 *
1996 * retransmitted skbs are permanently tagged with TCPCB_EVER_RETRANS 2002 * In theory F-RTO can be used repeatedly during loss recovery.
1997 * so FLAG_ORIG_SACK_ACKED is always correct. But we do disable F-RTO 2003 * In practice this interacts badly with broken middle-boxes that
1998 * on PTMU discovery to avoid sending new data. 2004 * falsely raise the receive window, which results in repeated
2005 * timeouts and stop-and-go behavior.
1999 */ 2006 */
2000 tp->frto = sysctl_tcp_frto && !inet_csk(sk)->icsk_mtup.probe_size; 2007 tp->frto = sysctl_tcp_frto &&
2008 (new_recovery || icsk->icsk_retransmits) &&
2009 !inet_csk(sk)->icsk_mtup.probe_size;
2001} 2010}
2002 2011
2003/* If ACK arrived pointing to a remembered SACK, it means that our 2012/* If ACK arrived pointing to a remembered SACK, it means that our
@@ -5541,6 +5550,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
5541 struct inet_connection_sock *icsk = inet_csk(sk); 5550 struct inet_connection_sock *icsk = inet_csk(sk);
5542 5551
5543 tcp_set_state(sk, TCP_ESTABLISHED); 5552 tcp_set_state(sk, TCP_ESTABLISHED);
5553 icsk->icsk_ack.lrcvtime = tcp_time_stamp;
5544 5554
5545 if (skb) { 5555 if (skb) {
5546 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); 5556 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
@@ -5759,7 +5769,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5759 * to stand against the temptation 8) --ANK 5769 * to stand against the temptation 8) --ANK
5760 */ 5770 */
5761 inet_csk_schedule_ack(sk); 5771 inet_csk_schedule_ack(sk);
5762 icsk->icsk_ack.lrcvtime = tcp_time_stamp;
5763 tcp_enter_quickack_mode(sk); 5772 tcp_enter_quickack_mode(sk);
5764 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 5773 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
5765 TCP_DELACK_MAX, TCP_RTO_MAX); 5774 TCP_DELACK_MAX, TCP_RTO_MAX);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 7e16243cdb58..65c0f3d13eca 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -460,6 +460,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
460 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 460 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
461 minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U); 461 minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U);
462 newicsk->icsk_rto = TCP_TIMEOUT_INIT; 462 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
463 newicsk->icsk_ack.lrcvtime = tcp_time_stamp;
463 464
464 newtp->packets_out = 0; 465 newtp->packets_out = 0;
465 newtp->retrans_out = 0; 466 newtp->retrans_out = 0;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 22548b5f05cb..c3c082ed3879 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2999,6 +2999,8 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2999{ 2999{
3000 struct sk_buff *skb; 3000 struct sk_buff *skb;
3001 3001
3002 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
3003
3002 /* NOTE: No TCP options attached and we never retransmit this. */ 3004 /* NOTE: No TCP options attached and we never retransmit this. */
3003 skb = alloc_skb(MAX_TCP_HEADER, priority); 3005 skb = alloc_skb(MAX_TCP_HEADER, priority);
3004 if (!skb) { 3006 if (!skb) {
@@ -3014,8 +3016,6 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
3014 /* Send it off. */ 3016 /* Send it off. */
3015 if (tcp_transmit_skb(sk, skb, 0, priority)) 3017 if (tcp_transmit_skb(sk, skb, 0, priority))
3016 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 3018 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3017
3018 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
3019} 3019}
3020 3020
3021/* Send a crossed SYN-ACK during socket establishment. 3021/* Send a crossed SYN-ACK during socket establishment.
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index 4ecb38ae8504..d8acbd9f477a 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -12,7 +12,8 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
12 /* Account for retransmits that are lost again */ 12 /* Account for retransmits that are lost again */
13 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 13 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
14 tp->retrans_out -= tcp_skb_pcount(skb); 14 tp->retrans_out -= tcp_skb_pcount(skb);
15 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT); 15 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
16 tcp_skb_pcount(skb));
16 } 17 }
17} 18}
18 19
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 363172527e43..80ce478c4851 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3626,14 +3626,19 @@ restart:
3626 INIT_LIST_HEAD(&del_list); 3626 INIT_LIST_HEAD(&del_list);
3627 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { 3627 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
3628 struct rt6_info *rt = NULL; 3628 struct rt6_info *rt = NULL;
3629 bool keep;
3629 3630
3630 addrconf_del_dad_work(ifa); 3631 addrconf_del_dad_work(ifa);
3631 3632
3633 keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3634 !addr_is_local(&ifa->addr);
3635 if (!keep)
3636 list_move(&ifa->if_list, &del_list);
3637
3632 write_unlock_bh(&idev->lock); 3638 write_unlock_bh(&idev->lock);
3633 spin_lock_bh(&ifa->lock); 3639 spin_lock_bh(&ifa->lock);
3634 3640
3635 if (keep_addr && (ifa->flags & IFA_F_PERMANENT) && 3641 if (keep) {
3636 !addr_is_local(&ifa->addr)) {
3637 /* set state to skip the notifier below */ 3642 /* set state to skip the notifier below */
3638 state = INET6_IFADDR_STATE_DEAD; 3643 state = INET6_IFADDR_STATE_DEAD;
3639 ifa->state = 0; 3644 ifa->state = 0;
@@ -3645,8 +3650,6 @@ restart:
3645 } else { 3650 } else {
3646 state = ifa->state; 3651 state = ifa->state;
3647 ifa->state = INET6_IFADDR_STATE_DEAD; 3652 ifa->state = INET6_IFADDR_STATE_DEAD;
3648
3649 list_move(&ifa->if_list, &del_list);
3650 } 3653 }
3651 3654
3652 spin_unlock_bh(&ifa->lock); 3655 spin_unlock_bh(&ifa->lock);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index eec27f87efac..e011122ebd43 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -405,9 +405,6 @@ static inline bool ipv6_datagram_support_addr(struct sock_exterr_skb *serr)
405 * At one point, excluding local errors was a quick test to identify icmp/icmp6 405 * At one point, excluding local errors was a quick test to identify icmp/icmp6
406 * errors. This is no longer true, but the test remained, so the v6 stack, 406 * errors. This is no longer true, but the test remained, so the v6 stack,
407 * unlike v4, also honors cmsg requests on all wifi and timestamp errors. 407 * unlike v4, also honors cmsg requests on all wifi and timestamp errors.
408 *
409 * Timestamp code paths do not initialize the fields expected by cmsg:
410 * the PKTINFO fields in skb->cb[]. Fill those in here.
411 */ 408 */
412static bool ip6_datagram_support_cmsg(struct sk_buff *skb, 409static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
413 struct sock_exterr_skb *serr) 410 struct sock_exterr_skb *serr)
@@ -419,14 +416,9 @@ static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
419 if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL) 416 if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL)
420 return false; 417 return false;
421 418
422 if (!skb->dev) 419 if (!IP6CB(skb)->iif)
423 return false; 420 return false;
424 421
425 if (skb->protocol == htons(ETH_P_IPV6))
426 IP6CB(skb)->iif = skb->dev->ifindex;
427 else
428 PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex;
429
430 return true; 422 return true;
431} 423}
432 424
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 275cac628a95..25192a3b0cd7 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -388,7 +388,6 @@ looped_back:
388 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 388 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
389 ((&hdr->segments_left) - 389 ((&hdr->segments_left) -
390 skb_network_header(skb))); 390 skb_network_header(skb)));
391 kfree_skb(skb);
392 return -1; 391 return -1;
393 } 392 }
394 393
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index aacfb4bce153..c45b12b4431c 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -122,11 +122,14 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
122 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); 122 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
123 /* 123 /*
124 * RFC4291 2.5.3 124 * RFC4291 2.5.3
125 * The loopback address must not be used as the source address in IPv6
126 * packets that are sent outside of a single node. [..]
125 * A packet received on an interface with a destination address 127 * A packet received on an interface with a destination address
126 * of loopback must be dropped. 128 * of loopback must be dropped.
127 */ 129 */
128 if (!(dev->flags & IFF_LOOPBACK) && 130 if ((ipv6_addr_loopback(&hdr->saddr) ||
129 ipv6_addr_loopback(&hdr->daddr)) 131 ipv6_addr_loopback(&hdr->daddr)) &&
132 !(dev->flags & IFF_LOOPBACK))
130 goto err; 133 goto err;
131 134
132 /* RFC4291 Errata ID: 3480 135 /* RFC4291 Errata ID: 3480
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 6ba6c900ebcf..bf34d0950752 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -774,7 +774,8 @@ failure:
774 * Delete a VIF entry 774 * Delete a VIF entry
775 */ 775 */
776 776
777static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head) 777static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
778 struct list_head *head)
778{ 779{
779 struct mif_device *v; 780 struct mif_device *v;
780 struct net_device *dev; 781 struct net_device *dev;
@@ -820,7 +821,7 @@ static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
820 dev->ifindex, &in6_dev->cnf); 821 dev->ifindex, &in6_dev->cnf);
821 } 822 }
822 823
823 if (v->flags & MIFF_REGISTER) 824 if ((v->flags & MIFF_REGISTER) && !notify)
824 unregister_netdevice_queue(dev, head); 825 unregister_netdevice_queue(dev, head);
825 826
826 dev_put(dev); 827 dev_put(dev);
@@ -1331,7 +1332,6 @@ static int ip6mr_device_event(struct notifier_block *this,
1331 struct mr6_table *mrt; 1332 struct mr6_table *mrt;
1332 struct mif_device *v; 1333 struct mif_device *v;
1333 int ct; 1334 int ct;
1334 LIST_HEAD(list);
1335 1335
1336 if (event != NETDEV_UNREGISTER) 1336 if (event != NETDEV_UNREGISTER)
1337 return NOTIFY_DONE; 1337 return NOTIFY_DONE;
@@ -1340,10 +1340,9 @@ static int ip6mr_device_event(struct notifier_block *this,
1340 v = &mrt->vif6_table[0]; 1340 v = &mrt->vif6_table[0];
1341 for (ct = 0; ct < mrt->maxvif; ct++, v++) { 1341 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1342 if (v->dev == dev) 1342 if (v->dev == dev)
1343 mif6_delete(mrt, ct, &list); 1343 mif6_delete(mrt, ct, 1, NULL);
1344 } 1344 }
1345 } 1345 }
1346 unregister_netdevice_many(&list);
1347 1346
1348 return NOTIFY_DONE; 1347 return NOTIFY_DONE;
1349} 1348}
@@ -1552,7 +1551,7 @@ static void mroute_clean_tables(struct mr6_table *mrt, bool all)
1552 for (i = 0; i < mrt->maxvif; i++) { 1551 for (i = 0; i < mrt->maxvif; i++) {
1553 if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC)) 1552 if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
1554 continue; 1553 continue;
1555 mif6_delete(mrt, i, &list); 1554 mif6_delete(mrt, i, 0, &list);
1556 } 1555 }
1557 unregister_netdevice_many(&list); 1556 unregister_netdevice_many(&list);
1558 1557
@@ -1707,7 +1706,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1707 if (copy_from_user(&mifi, optval, sizeof(mifi_t))) 1706 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1708 return -EFAULT; 1707 return -EFAULT;
1709 rtnl_lock(); 1708 rtnl_lock();
1710 ret = mif6_delete(mrt, mifi, NULL); 1709 ret = mif6_delete(mrt, mifi, 0, NULL);
1711 rtnl_unlock(); 1710 rtnl_unlock();
1712 return ret; 1711 return ret;
1713 1712
diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c
index 6c5b5b1830a7..4146536e9c15 100644
--- a/net/ipv6/netfilter/nft_masq_ipv6.c
+++ b/net/ipv6/netfilter/nft_masq_ipv6.c
@@ -27,10 +27,10 @@ static void nft_masq_ipv6_eval(const struct nft_expr *expr,
27 memset(&range, 0, sizeof(range)); 27 memset(&range, 0, sizeof(range));
28 range.flags = priv->flags; 28 range.flags = priv->flags;
29 if (priv->sreg_proto_min) { 29 if (priv->sreg_proto_min) {
30 range.min_proto.all = 30 range.min_proto.all = (__force __be16)nft_reg_load16(
31 *(__be16 *)&regs->data[priv->sreg_proto_min]; 31 &regs->data[priv->sreg_proto_min]);
32 range.max_proto.all = 32 range.max_proto.all = (__force __be16)nft_reg_load16(
33 *(__be16 *)&regs->data[priv->sreg_proto_max]; 33 &regs->data[priv->sreg_proto_max]);
34 } 34 }
35 regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range, 35 regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range,
36 nft_out(pkt)); 36 nft_out(pkt));
diff --git a/net/ipv6/netfilter/nft_redir_ipv6.c b/net/ipv6/netfilter/nft_redir_ipv6.c
index f5ac080fc084..a27e424f690d 100644
--- a/net/ipv6/netfilter/nft_redir_ipv6.c
+++ b/net/ipv6/netfilter/nft_redir_ipv6.c
@@ -26,10 +26,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr,
26 26
27 memset(&range, 0, sizeof(range)); 27 memset(&range, 0, sizeof(range));
28 if (priv->sreg_proto_min) { 28 if (priv->sreg_proto_min) {
29 range.min_proto.all = 29 range.min_proto.all = (__force __be16)nft_reg_load16(
30 *(__be16 *)&regs->data[priv->sreg_proto_min], 30 &regs->data[priv->sreg_proto_min]);
31 range.max_proto.all = 31 range.max_proto.all = (__force __be16)nft_reg_load16(
32 *(__be16 *)&regs->data[priv->sreg_proto_max], 32 &regs->data[priv->sreg_proto_max]);
33 range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 33 range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
34 } 34 }
35 35
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 35c58b669ebd..fb174b590fd3 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1854,6 +1854,10 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
1854 int addr_type; 1854 int addr_type;
1855 int err = -EINVAL; 1855 int err = -EINVAL;
1856 1856
1857 /* RTF_PCPU is an internal flag; can not be set by userspace */
1858 if (cfg->fc_flags & RTF_PCPU)
1859 goto out;
1860
1857 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128) 1861 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1858 goto out; 1862 goto out;
1859#ifndef CONFIG_IPV6_SUBTREES 1863#ifndef CONFIG_IPV6_SUBTREES
@@ -3423,6 +3427,8 @@ static int rt6_fill_node(struct net *net,
3423 } 3427 }
3424 else if (rt->rt6i_flags & RTF_LOCAL) 3428 else if (rt->rt6i_flags & RTF_LOCAL)
3425 rtm->rtm_type = RTN_LOCAL; 3429 rtm->rtm_type = RTN_LOCAL;
3430 else if (rt->rt6i_flags & RTF_ANYCAST)
3431 rtm->rtm_type = RTN_ANYCAST;
3426 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK)) 3432 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
3427 rtm->rtm_type = RTN_LOCAL; 3433 rtm->rtm_type = RTN_LOCAL;
3428 else 3434 else
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index a855eb325b03..5f44ffed2576 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -53,6 +53,9 @@ bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len)
53 struct sr6_tlv *tlv; 53 struct sr6_tlv *tlv;
54 unsigned int tlv_len; 54 unsigned int tlv_len;
55 55
56 if (trailing < sizeof(*tlv))
57 return false;
58
56 tlv = (struct sr6_tlv *)((unsigned char *)srh + tlv_offset); 59 tlv = (struct sr6_tlv *)((unsigned char *)srh + tlv_offset);
57 tlv_len = sizeof(*tlv) + tlv->len; 60 tlv_len = sizeof(*tlv) + tlv->len;
58 61
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 4e4c401e3bc6..e28082f0a307 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1035,6 +1035,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1035 ipc6.hlimit = -1; 1035 ipc6.hlimit = -1;
1036 ipc6.tclass = -1; 1036 ipc6.tclass = -1;
1037 ipc6.dontfrag = -1; 1037 ipc6.dontfrag = -1;
1038 sockc.tsflags = sk->sk_tsflags;
1038 1039
1039 /* destination address check */ 1040 /* destination address check */
1040 if (sin6) { 1041 if (sin6) {
@@ -1159,7 +1160,6 @@ do_udp_sendmsg:
1159 1160
1160 fl6.flowi6_mark = sk->sk_mark; 1161 fl6.flowi6_mark = sk->sk_mark;
1161 fl6.flowi6_uid = sk->sk_uid; 1162 fl6.flowi6_uid = sk->sk_uid;
1162 sockc.tsflags = sk->sk_tsflags;
1163 1163
1164 if (msg->msg_controllen) { 1164 if (msg->msg_controllen) {
1165 opt = &opt_space; 1165 opt = &opt_space;
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 309062f3debe..31762f76cdb5 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1687,7 +1687,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1687 struct kcm_attach info; 1687 struct kcm_attach info;
1688 1688
1689 if (copy_from_user(&info, (void __user *)arg, sizeof(info))) 1689 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1690 err = -EFAULT; 1690 return -EFAULT;
1691 1691
1692 err = kcm_attach_ioctl(sock, &info); 1692 err = kcm_attach_ioctl(sock, &info);
1693 1693
@@ -1697,7 +1697,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1697 struct kcm_unattach info; 1697 struct kcm_unattach info;
1698 1698
1699 if (copy_from_user(&info, (void __user *)arg, sizeof(info))) 1699 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1700 err = -EFAULT; 1700 return -EFAULT;
1701 1701
1702 err = kcm_unattach_ioctl(sock, &info); 1702 err = kcm_unattach_ioctl(sock, &info);
1703 1703
@@ -1708,7 +1708,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1708 struct socket *newsock = NULL; 1708 struct socket *newsock = NULL;
1709 1709
1710 if (copy_from_user(&info, (void __user *)arg, sizeof(info))) 1710 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1711 err = -EFAULT; 1711 return -EFAULT;
1712 1712
1713 err = kcm_clone(sock, &info, &newsock); 1713 err = kcm_clone(sock, &info, &newsock);
1714 1714
diff --git a/net/key/af_key.c b/net/key/af_key.c
index c6252ed42c1d..be8cecc65002 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -63,8 +63,13 @@ struct pfkey_sock {
63 } u; 63 } u;
64 struct sk_buff *skb; 64 struct sk_buff *skb;
65 } dump; 65 } dump;
66 struct mutex dump_lock;
66}; 67};
67 68
69static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
70 xfrm_address_t *saddr, xfrm_address_t *daddr,
71 u16 *family);
72
68static inline struct pfkey_sock *pfkey_sk(struct sock *sk) 73static inline struct pfkey_sock *pfkey_sk(struct sock *sk)
69{ 74{
70 return (struct pfkey_sock *)sk; 75 return (struct pfkey_sock *)sk;
@@ -139,6 +144,7 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
139{ 144{
140 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); 145 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
141 struct sock *sk; 146 struct sock *sk;
147 struct pfkey_sock *pfk;
142 int err; 148 int err;
143 149
144 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 150 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
@@ -153,6 +159,9 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
153 if (sk == NULL) 159 if (sk == NULL)
154 goto out; 160 goto out;
155 161
162 pfk = pfkey_sk(sk);
163 mutex_init(&pfk->dump_lock);
164
156 sock->ops = &pfkey_ops; 165 sock->ops = &pfkey_ops;
157 sock_init_data(sock, sk); 166 sock_init_data(sock, sk);
158 167
@@ -281,13 +290,23 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
281 struct sadb_msg *hdr; 290 struct sadb_msg *hdr;
282 int rc; 291 int rc;
283 292
293 mutex_lock(&pfk->dump_lock);
294 if (!pfk->dump.dump) {
295 rc = 0;
296 goto out;
297 }
298
284 rc = pfk->dump.dump(pfk); 299 rc = pfk->dump.dump(pfk);
285 if (rc == -ENOBUFS) 300 if (rc == -ENOBUFS) {
286 return 0; 301 rc = 0;
302 goto out;
303 }
287 304
288 if (pfk->dump.skb) { 305 if (pfk->dump.skb) {
289 if (!pfkey_can_dump(&pfk->sk)) 306 if (!pfkey_can_dump(&pfk->sk)) {
290 return 0; 307 rc = 0;
308 goto out;
309 }
291 310
292 hdr = (struct sadb_msg *) pfk->dump.skb->data; 311 hdr = (struct sadb_msg *) pfk->dump.skb->data;
293 hdr->sadb_msg_seq = 0; 312 hdr->sadb_msg_seq = 0;
@@ -298,6 +317,9 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
298 } 317 }
299 318
300 pfkey_terminate_dump(pfk); 319 pfkey_terminate_dump(pfk);
320
321out:
322 mutex_unlock(&pfk->dump_lock);
301 return rc; 323 return rc;
302} 324}
303 325
@@ -1793,19 +1815,26 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
1793 struct xfrm_address_filter *filter = NULL; 1815 struct xfrm_address_filter *filter = NULL;
1794 struct pfkey_sock *pfk = pfkey_sk(sk); 1816 struct pfkey_sock *pfk = pfkey_sk(sk);
1795 1817
1796 if (pfk->dump.dump != NULL) 1818 mutex_lock(&pfk->dump_lock);
1819 if (pfk->dump.dump != NULL) {
1820 mutex_unlock(&pfk->dump_lock);
1797 return -EBUSY; 1821 return -EBUSY;
1822 }
1798 1823
1799 proto = pfkey_satype2proto(hdr->sadb_msg_satype); 1824 proto = pfkey_satype2proto(hdr->sadb_msg_satype);
1800 if (proto == 0) 1825 if (proto == 0) {
1826 mutex_unlock(&pfk->dump_lock);
1801 return -EINVAL; 1827 return -EINVAL;
1828 }
1802 1829
1803 if (ext_hdrs[SADB_X_EXT_FILTER - 1]) { 1830 if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
1804 struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1]; 1831 struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
1805 1832
1806 filter = kmalloc(sizeof(*filter), GFP_KERNEL); 1833 filter = kmalloc(sizeof(*filter), GFP_KERNEL);
1807 if (filter == NULL) 1834 if (filter == NULL) {
1835 mutex_unlock(&pfk->dump_lock);
1808 return -ENOMEM; 1836 return -ENOMEM;
1837 }
1809 1838
1810 memcpy(&filter->saddr, &xfilter->sadb_x_filter_saddr, 1839 memcpy(&filter->saddr, &xfilter->sadb_x_filter_saddr,
1811 sizeof(xfrm_address_t)); 1840 sizeof(xfrm_address_t));
@@ -1821,6 +1850,7 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
1821 pfk->dump.dump = pfkey_dump_sa; 1850 pfk->dump.dump = pfkey_dump_sa;
1822 pfk->dump.done = pfkey_dump_sa_done; 1851 pfk->dump.done = pfkey_dump_sa_done;
1823 xfrm_state_walk_init(&pfk->dump.u.state, proto, filter); 1852 xfrm_state_walk_init(&pfk->dump.u.state, proto, filter);
1853 mutex_unlock(&pfk->dump_lock);
1824 1854
1825 return pfkey_do_dump(pfk); 1855 return pfkey_do_dump(pfk);
1826} 1856}
@@ -1913,19 +1943,14 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
1913 1943
1914 /* addresses present only in tunnel mode */ 1944 /* addresses present only in tunnel mode */
1915 if (t->mode == XFRM_MODE_TUNNEL) { 1945 if (t->mode == XFRM_MODE_TUNNEL) {
1916 u8 *sa = (u8 *) (rq + 1); 1946 int err;
1917 int family, socklen;
1918 1947
1919 family = pfkey_sockaddr_extract((struct sockaddr *)sa, 1948 err = parse_sockaddr_pair(
1920 &t->saddr); 1949 (struct sockaddr *)(rq + 1),
1921 if (!family) 1950 rq->sadb_x_ipsecrequest_len - sizeof(*rq),
1922 return -EINVAL; 1951 &t->saddr, &t->id.daddr, &t->encap_family);
1923 1952 if (err)
1924 socklen = pfkey_sockaddr_len(family); 1953 return err;
1925 if (pfkey_sockaddr_extract((struct sockaddr *)(sa + socklen),
1926 &t->id.daddr) != family)
1927 return -EINVAL;
1928 t->encap_family = family;
1929 } else 1954 } else
1930 t->encap_family = xp->family; 1955 t->encap_family = xp->family;
1931 1956
@@ -1945,7 +1970,11 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
1945 if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy)) 1970 if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy))
1946 return -EINVAL; 1971 return -EINVAL;
1947 1972
1948 while (len >= sizeof(struct sadb_x_ipsecrequest)) { 1973 while (len >= sizeof(*rq)) {
1974 if (len < rq->sadb_x_ipsecrequest_len ||
1975 rq->sadb_x_ipsecrequest_len < sizeof(*rq))
1976 return -EINVAL;
1977
1949 if ((err = parse_ipsecrequest(xp, rq)) < 0) 1978 if ((err = parse_ipsecrequest(xp, rq)) < 0)
1950 return err; 1979 return err;
1951 len -= rq->sadb_x_ipsecrequest_len; 1980 len -= rq->sadb_x_ipsecrequest_len;
@@ -2408,7 +2437,6 @@ out:
2408 return err; 2437 return err;
2409} 2438}
2410 2439
2411#ifdef CONFIG_NET_KEY_MIGRATE
2412static int pfkey_sockaddr_pair_size(sa_family_t family) 2440static int pfkey_sockaddr_pair_size(sa_family_t family)
2413{ 2441{
2414 return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2); 2442 return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2);
@@ -2420,7 +2448,7 @@ static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
2420{ 2448{
2421 int af, socklen; 2449 int af, socklen;
2422 2450
2423 if (ext_len < pfkey_sockaddr_pair_size(sa->sa_family)) 2451 if (ext_len < 2 || ext_len < pfkey_sockaddr_pair_size(sa->sa_family))
2424 return -EINVAL; 2452 return -EINVAL;
2425 2453
2426 af = pfkey_sockaddr_extract(sa, saddr); 2454 af = pfkey_sockaddr_extract(sa, saddr);
@@ -2436,6 +2464,7 @@ static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
2436 return 0; 2464 return 0;
2437} 2465}
2438 2466
2467#ifdef CONFIG_NET_KEY_MIGRATE
2439static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len, 2468static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
2440 struct xfrm_migrate *m) 2469 struct xfrm_migrate *m)
2441{ 2470{
@@ -2443,13 +2472,14 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
2443 struct sadb_x_ipsecrequest *rq2; 2472 struct sadb_x_ipsecrequest *rq2;
2444 int mode; 2473 int mode;
2445 2474
2446 if (len <= sizeof(struct sadb_x_ipsecrequest) || 2475 if (len < sizeof(*rq1) ||
2447 len < rq1->sadb_x_ipsecrequest_len) 2476 len < rq1->sadb_x_ipsecrequest_len ||
2477 rq1->sadb_x_ipsecrequest_len < sizeof(*rq1))
2448 return -EINVAL; 2478 return -EINVAL;
2449 2479
2450 /* old endoints */ 2480 /* old endoints */
2451 err = parse_sockaddr_pair((struct sockaddr *)(rq1 + 1), 2481 err = parse_sockaddr_pair((struct sockaddr *)(rq1 + 1),
2452 rq1->sadb_x_ipsecrequest_len, 2482 rq1->sadb_x_ipsecrequest_len - sizeof(*rq1),
2453 &m->old_saddr, &m->old_daddr, 2483 &m->old_saddr, &m->old_daddr,
2454 &m->old_family); 2484 &m->old_family);
2455 if (err) 2485 if (err)
@@ -2458,13 +2488,14 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
2458 rq2 = (struct sadb_x_ipsecrequest *)((u8 *)rq1 + rq1->sadb_x_ipsecrequest_len); 2488 rq2 = (struct sadb_x_ipsecrequest *)((u8 *)rq1 + rq1->sadb_x_ipsecrequest_len);
2459 len -= rq1->sadb_x_ipsecrequest_len; 2489 len -= rq1->sadb_x_ipsecrequest_len;
2460 2490
2461 if (len <= sizeof(struct sadb_x_ipsecrequest) || 2491 if (len <= sizeof(*rq2) ||
2462 len < rq2->sadb_x_ipsecrequest_len) 2492 len < rq2->sadb_x_ipsecrequest_len ||
2493 rq2->sadb_x_ipsecrequest_len < sizeof(*rq2))
2463 return -EINVAL; 2494 return -EINVAL;
2464 2495
2465 /* new endpoints */ 2496 /* new endpoints */
2466 err = parse_sockaddr_pair((struct sockaddr *)(rq2 + 1), 2497 err = parse_sockaddr_pair((struct sockaddr *)(rq2 + 1),
2467 rq2->sadb_x_ipsecrequest_len, 2498 rq2->sadb_x_ipsecrequest_len - sizeof(*rq2),
2468 &m->new_saddr, &m->new_daddr, 2499 &m->new_saddr, &m->new_daddr,
2469 &m->new_family); 2500 &m->new_family);
2470 if (err) 2501 if (err)
@@ -2679,14 +2710,18 @@ static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb
2679{ 2710{
2680 struct pfkey_sock *pfk = pfkey_sk(sk); 2711 struct pfkey_sock *pfk = pfkey_sk(sk);
2681 2712
2682 if (pfk->dump.dump != NULL) 2713 mutex_lock(&pfk->dump_lock);
2714 if (pfk->dump.dump != NULL) {
2715 mutex_unlock(&pfk->dump_lock);
2683 return -EBUSY; 2716 return -EBUSY;
2717 }
2684 2718
2685 pfk->dump.msg_version = hdr->sadb_msg_version; 2719 pfk->dump.msg_version = hdr->sadb_msg_version;
2686 pfk->dump.msg_portid = hdr->sadb_msg_pid; 2720 pfk->dump.msg_portid = hdr->sadb_msg_pid;
2687 pfk->dump.dump = pfkey_dump_sp; 2721 pfk->dump.dump = pfkey_dump_sp;
2688 pfk->dump.done = pfkey_dump_sp_done; 2722 pfk->dump.done = pfkey_dump_sp_done;
2689 xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN); 2723 xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN);
2724 mutex_unlock(&pfk->dump_lock);
2690 2725
2691 return pfkey_do_dump(pfk); 2726 return pfkey_do_dump(pfk);
2692} 2727}
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 8adab6335ced..e37d9554da7b 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -278,7 +278,57 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn
278} 278}
279EXPORT_SYMBOL_GPL(l2tp_session_find); 279EXPORT_SYMBOL_GPL(l2tp_session_find);
280 280
281struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) 281/* Like l2tp_session_find() but takes a reference on the returned session.
282 * Optionally calls session->ref() too if do_ref is true.
283 */
284struct l2tp_session *l2tp_session_get(struct net *net,
285 struct l2tp_tunnel *tunnel,
286 u32 session_id, bool do_ref)
287{
288 struct hlist_head *session_list;
289 struct l2tp_session *session;
290
291 if (!tunnel) {
292 struct l2tp_net *pn = l2tp_pernet(net);
293
294 session_list = l2tp_session_id_hash_2(pn, session_id);
295
296 rcu_read_lock_bh();
297 hlist_for_each_entry_rcu(session, session_list, global_hlist) {
298 if (session->session_id == session_id) {
299 l2tp_session_inc_refcount(session);
300 if (do_ref && session->ref)
301 session->ref(session);
302 rcu_read_unlock_bh();
303
304 return session;
305 }
306 }
307 rcu_read_unlock_bh();
308
309 return NULL;
310 }
311
312 session_list = l2tp_session_id_hash(tunnel, session_id);
313 read_lock_bh(&tunnel->hlist_lock);
314 hlist_for_each_entry(session, session_list, hlist) {
315 if (session->session_id == session_id) {
316 l2tp_session_inc_refcount(session);
317 if (do_ref && session->ref)
318 session->ref(session);
319 read_unlock_bh(&tunnel->hlist_lock);
320
321 return session;
322 }
323 }
324 read_unlock_bh(&tunnel->hlist_lock);
325
326 return NULL;
327}
328EXPORT_SYMBOL_GPL(l2tp_session_get);
329
330struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
331 bool do_ref)
282{ 332{
283 int hash; 333 int hash;
284 struct l2tp_session *session; 334 struct l2tp_session *session;
@@ -288,6 +338,9 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
288 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { 338 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
289 hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) { 339 hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
290 if (++count > nth) { 340 if (++count > nth) {
341 l2tp_session_inc_refcount(session);
342 if (do_ref && session->ref)
343 session->ref(session);
291 read_unlock_bh(&tunnel->hlist_lock); 344 read_unlock_bh(&tunnel->hlist_lock);
292 return session; 345 return session;
293 } 346 }
@@ -298,12 +351,13 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
298 351
299 return NULL; 352 return NULL;
300} 353}
301EXPORT_SYMBOL_GPL(l2tp_session_find_nth); 354EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
302 355
303/* Lookup a session by interface name. 356/* Lookup a session by interface name.
304 * This is very inefficient but is only used by management interfaces. 357 * This is very inefficient but is only used by management interfaces.
305 */ 358 */
306struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) 359struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
360 bool do_ref)
307{ 361{
308 struct l2tp_net *pn = l2tp_pernet(net); 362 struct l2tp_net *pn = l2tp_pernet(net);
309 int hash; 363 int hash;
@@ -313,7 +367,11 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
313 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) { 367 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
314 hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) { 368 hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
315 if (!strcmp(session->ifname, ifname)) { 369 if (!strcmp(session->ifname, ifname)) {
370 l2tp_session_inc_refcount(session);
371 if (do_ref && session->ref)
372 session->ref(session);
316 rcu_read_unlock_bh(); 373 rcu_read_unlock_bh();
374
317 return session; 375 return session;
318 } 376 }
319 } 377 }
@@ -323,7 +381,49 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
323 381
324 return NULL; 382 return NULL;
325} 383}
326EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname); 384EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
385
386static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
387 struct l2tp_session *session)
388{
389 struct l2tp_session *session_walk;
390 struct hlist_head *g_head;
391 struct hlist_head *head;
392 struct l2tp_net *pn;
393
394 head = l2tp_session_id_hash(tunnel, session->session_id);
395
396 write_lock_bh(&tunnel->hlist_lock);
397 hlist_for_each_entry(session_walk, head, hlist)
398 if (session_walk->session_id == session->session_id)
399 goto exist;
400
401 if (tunnel->version == L2TP_HDR_VER_3) {
402 pn = l2tp_pernet(tunnel->l2tp_net);
403 g_head = l2tp_session_id_hash_2(l2tp_pernet(tunnel->l2tp_net),
404 session->session_id);
405
406 spin_lock_bh(&pn->l2tp_session_hlist_lock);
407 hlist_for_each_entry(session_walk, g_head, global_hlist)
408 if (session_walk->session_id == session->session_id)
409 goto exist_glob;
410
411 hlist_add_head_rcu(&session->global_hlist, g_head);
412 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
413 }
414
415 hlist_add_head(&session->hlist, head);
416 write_unlock_bh(&tunnel->hlist_lock);
417
418 return 0;
419
420exist_glob:
421 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
422exist:
423 write_unlock_bh(&tunnel->hlist_lock);
424
425 return -EEXIST;
426}
327 427
328/* Lookup a tunnel by id 428/* Lookup a tunnel by id
329 */ 429 */
@@ -633,6 +733,9 @@ discard:
633 * a data (not control) frame before coming here. Fields up to the 733 * a data (not control) frame before coming here. Fields up to the
634 * session-id have already been parsed and ptr points to the data 734 * session-id have already been parsed and ptr points to the data
635 * after the session-id. 735 * after the session-id.
736 *
737 * session->ref() must have been called prior to l2tp_recv_common().
738 * session->deref() will be called automatically after skb is processed.
636 */ 739 */
637void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, 740void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
638 unsigned char *ptr, unsigned char *optr, u16 hdrflags, 741 unsigned char *ptr, unsigned char *optr, u16 hdrflags,
@@ -642,14 +745,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
642 int offset; 745 int offset;
643 u32 ns, nr; 746 u32 ns, nr;
644 747
645 /* The ref count is increased since we now hold a pointer to
646 * the session. Take care to decrement the refcnt when exiting
647 * this function from now on...
648 */
649 l2tp_session_inc_refcount(session);
650 if (session->ref)
651 (*session->ref)(session);
652
653 /* Parse and check optional cookie */ 748 /* Parse and check optional cookie */
654 if (session->peer_cookie_len > 0) { 749 if (session->peer_cookie_len > 0) {
655 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) { 750 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
@@ -802,8 +897,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
802 /* Try to dequeue as many skbs from reorder_q as we can. */ 897 /* Try to dequeue as many skbs from reorder_q as we can. */
803 l2tp_recv_dequeue(session); 898 l2tp_recv_dequeue(session);
804 899
805 l2tp_session_dec_refcount(session);
806
807 return; 900 return;
808 901
809discard: 902discard:
@@ -812,8 +905,6 @@ discard:
812 905
813 if (session->deref) 906 if (session->deref)
814 (*session->deref)(session); 907 (*session->deref)(session);
815
816 l2tp_session_dec_refcount(session);
817} 908}
818EXPORT_SYMBOL(l2tp_recv_common); 909EXPORT_SYMBOL(l2tp_recv_common);
819 910
@@ -920,8 +1011,14 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
920 } 1011 }
921 1012
922 /* Find the session context */ 1013 /* Find the session context */
923 session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id); 1014 session = l2tp_session_get(tunnel->l2tp_net, tunnel, session_id, true);
924 if (!session || !session->recv_skb) { 1015 if (!session || !session->recv_skb) {
1016 if (session) {
1017 if (session->deref)
1018 session->deref(session);
1019 l2tp_session_dec_refcount(session);
1020 }
1021
925 /* Not found? Pass to userspace to deal with */ 1022 /* Not found? Pass to userspace to deal with */
926 l2tp_info(tunnel, L2TP_MSG_DATA, 1023 l2tp_info(tunnel, L2TP_MSG_DATA,
927 "%s: no session found (%u/%u). Passing up.\n", 1024 "%s: no session found (%u/%u). Passing up.\n",
@@ -930,6 +1027,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
930 } 1027 }
931 1028
932 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook); 1029 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
1030 l2tp_session_dec_refcount(session);
933 1031
934 return 0; 1032 return 0;
935 1033
@@ -1738,6 +1836,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1738struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) 1836struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
1739{ 1837{
1740 struct l2tp_session *session; 1838 struct l2tp_session *session;
1839 int err;
1741 1840
1742 session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL); 1841 session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
1743 if (session != NULL) { 1842 if (session != NULL) {
@@ -1793,6 +1892,13 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
1793 1892
1794 l2tp_session_set_header_len(session, tunnel->version); 1893 l2tp_session_set_header_len(session, tunnel->version);
1795 1894
1895 err = l2tp_session_add_to_tunnel(tunnel, session);
1896 if (err) {
1897 kfree(session);
1898
1899 return ERR_PTR(err);
1900 }
1901
1796 /* Bump the reference count. The session context is deleted 1902 /* Bump the reference count. The session context is deleted
1797 * only when this drops to zero. 1903 * only when this drops to zero.
1798 */ 1904 */
@@ -1802,28 +1908,14 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
1802 /* Ensure tunnel socket isn't deleted */ 1908 /* Ensure tunnel socket isn't deleted */
1803 sock_hold(tunnel->sock); 1909 sock_hold(tunnel->sock);
1804 1910
1805 /* Add session to the tunnel's hash list */
1806 write_lock_bh(&tunnel->hlist_lock);
1807 hlist_add_head(&session->hlist,
1808 l2tp_session_id_hash(tunnel, session_id));
1809 write_unlock_bh(&tunnel->hlist_lock);
1810
1811 /* And to the global session list if L2TPv3 */
1812 if (tunnel->version != L2TP_HDR_VER_2) {
1813 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1814
1815 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1816 hlist_add_head_rcu(&session->global_hlist,
1817 l2tp_session_id_hash_2(pn, session_id));
1818 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1819 }
1820
1821 /* Ignore management session in session count value */ 1911 /* Ignore management session in session count value */
1822 if (session->session_id != 0) 1912 if (session->session_id != 0)
1823 atomic_inc(&l2tp_session_count); 1913 atomic_inc(&l2tp_session_count);
1914
1915 return session;
1824 } 1916 }
1825 1917
1826 return session; 1918 return ERR_PTR(-ENOMEM);
1827} 1919}
1828EXPORT_SYMBOL_GPL(l2tp_session_create); 1920EXPORT_SYMBOL_GPL(l2tp_session_create);
1829 1921
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index aebf281d09ee..8ce7818c7a9d 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -230,11 +230,16 @@ out:
230 return tunnel; 230 return tunnel;
231} 231}
232 232
233struct l2tp_session *l2tp_session_get(struct net *net,
234 struct l2tp_tunnel *tunnel,
235 u32 session_id, bool do_ref);
233struct l2tp_session *l2tp_session_find(struct net *net, 236struct l2tp_session *l2tp_session_find(struct net *net,
234 struct l2tp_tunnel *tunnel, 237 struct l2tp_tunnel *tunnel,
235 u32 session_id); 238 u32 session_id);
236struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); 239struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
237struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); 240 bool do_ref);
241struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
242 bool do_ref);
238struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id); 243struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
239struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); 244struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
240 245
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 2d6760a2ae34..d100aed3d06f 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -53,7 +53,7 @@ static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)
53 53
54static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd) 54static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
55{ 55{
56 pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); 56 pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
57 pd->session_idx++; 57 pd->session_idx++;
58 58
59 if (pd->session == NULL) { 59 if (pd->session == NULL) {
@@ -238,10 +238,14 @@ static int l2tp_dfs_seq_show(struct seq_file *m, void *v)
238 } 238 }
239 239
240 /* Show the tunnel or session context */ 240 /* Show the tunnel or session context */
241 if (pd->session == NULL) 241 if (!pd->session) {
242 l2tp_dfs_seq_tunnel_show(m, pd->tunnel); 242 l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
243 else 243 } else {
244 l2tp_dfs_seq_session_show(m, pd->session); 244 l2tp_dfs_seq_session_show(m, pd->session);
245 if (pd->session->deref)
246 pd->session->deref(pd->session);
247 l2tp_session_dec_refcount(pd->session);
248 }
245 249
246out: 250out:
247 return 0; 251 return 0;
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 8bf18a5f66e0..6fd41d7afe1e 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -221,12 +221,6 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
221 goto out; 221 goto out;
222 } 222 }
223 223
224 session = l2tp_session_find(net, tunnel, session_id);
225 if (session) {
226 rc = -EEXIST;
227 goto out;
228 }
229
230 if (cfg->ifname) { 224 if (cfg->ifname) {
231 dev = dev_get_by_name(net, cfg->ifname); 225 dev = dev_get_by_name(net, cfg->ifname);
232 if (dev) { 226 if (dev) {
@@ -240,8 +234,8 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
240 234
241 session = l2tp_session_create(sizeof(*spriv), tunnel, session_id, 235 session = l2tp_session_create(sizeof(*spriv), tunnel, session_id,
242 peer_session_id, cfg); 236 peer_session_id, cfg);
243 if (!session) { 237 if (IS_ERR(session)) {
244 rc = -ENOMEM; 238 rc = PTR_ERR(session);
245 goto out; 239 goto out;
246 } 240 }
247 241
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index d25038cfd64e..4d322c1b7233 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -143,19 +143,19 @@ static int l2tp_ip_recv(struct sk_buff *skb)
143 } 143 }
144 144
145 /* Ok, this is a data packet. Lookup the session. */ 145 /* Ok, this is a data packet. Lookup the session. */
146 session = l2tp_session_find(net, NULL, session_id); 146 session = l2tp_session_get(net, NULL, session_id, true);
147 if (session == NULL) 147 if (!session)
148 goto discard; 148 goto discard;
149 149
150 tunnel = session->tunnel; 150 tunnel = session->tunnel;
151 if (tunnel == NULL) 151 if (!tunnel)
152 goto discard; 152 goto discard_sess;
153 153
154 /* Trace packet contents, if enabled */ 154 /* Trace packet contents, if enabled */
155 if (tunnel->debug & L2TP_MSG_DATA) { 155 if (tunnel->debug & L2TP_MSG_DATA) {
156 length = min(32u, skb->len); 156 length = min(32u, skb->len);
157 if (!pskb_may_pull(skb, length)) 157 if (!pskb_may_pull(skb, length))
158 goto discard; 158 goto discard_sess;
159 159
160 /* Point to L2TP header */ 160 /* Point to L2TP header */
161 optr = ptr = skb->data; 161 optr = ptr = skb->data;
@@ -165,6 +165,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
165 } 165 }
166 166
167 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); 167 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
168 l2tp_session_dec_refcount(session);
168 169
169 return 0; 170 return 0;
170 171
@@ -178,9 +179,10 @@ pass_up:
178 179
179 tunnel_id = ntohl(*(__be32 *) &skb->data[4]); 180 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
180 tunnel = l2tp_tunnel_find(net, tunnel_id); 181 tunnel = l2tp_tunnel_find(net, tunnel_id);
181 if (tunnel != NULL) 182 if (tunnel) {
182 sk = tunnel->sock; 183 sk = tunnel->sock;
183 else { 184 sock_hold(sk);
185 } else {
184 struct iphdr *iph = (struct iphdr *) skb_network_header(skb); 186 struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
185 187
186 read_lock_bh(&l2tp_ip_lock); 188 read_lock_bh(&l2tp_ip_lock);
@@ -202,6 +204,12 @@ pass_up:
202 204
203 return sk_receive_skb(sk, skb, 1); 205 return sk_receive_skb(sk, skb, 1);
204 206
207discard_sess:
208 if (session->deref)
209 session->deref(session);
210 l2tp_session_dec_refcount(session);
211 goto discard;
212
205discard_put: 213discard_put:
206 sock_put(sk); 214 sock_put(sk);
207 215
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index a4abcbc4c09a..88b397c30d86 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -156,19 +156,19 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
156 } 156 }
157 157
158 /* Ok, this is a data packet. Lookup the session. */ 158 /* Ok, this is a data packet. Lookup the session. */
159 session = l2tp_session_find(net, NULL, session_id); 159 session = l2tp_session_get(net, NULL, session_id, true);
160 if (session == NULL) 160 if (!session)
161 goto discard; 161 goto discard;
162 162
163 tunnel = session->tunnel; 163 tunnel = session->tunnel;
164 if (tunnel == NULL) 164 if (!tunnel)
165 goto discard; 165 goto discard_sess;
166 166
167 /* Trace packet contents, if enabled */ 167 /* Trace packet contents, if enabled */
168 if (tunnel->debug & L2TP_MSG_DATA) { 168 if (tunnel->debug & L2TP_MSG_DATA) {
169 length = min(32u, skb->len); 169 length = min(32u, skb->len);
170 if (!pskb_may_pull(skb, length)) 170 if (!pskb_may_pull(skb, length))
171 goto discard; 171 goto discard_sess;
172 172
173 /* Point to L2TP header */ 173 /* Point to L2TP header */
174 optr = ptr = skb->data; 174 optr = ptr = skb->data;
@@ -179,6 +179,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
179 179
180 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, 180 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
181 tunnel->recv_payload_hook); 181 tunnel->recv_payload_hook);
182 l2tp_session_dec_refcount(session);
183
182 return 0; 184 return 0;
183 185
184pass_up: 186pass_up:
@@ -191,9 +193,10 @@ pass_up:
191 193
192 tunnel_id = ntohl(*(__be32 *) &skb->data[4]); 194 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
193 tunnel = l2tp_tunnel_find(net, tunnel_id); 195 tunnel = l2tp_tunnel_find(net, tunnel_id);
194 if (tunnel != NULL) 196 if (tunnel) {
195 sk = tunnel->sock; 197 sk = tunnel->sock;
196 else { 198 sock_hold(sk);
199 } else {
197 struct ipv6hdr *iph = ipv6_hdr(skb); 200 struct ipv6hdr *iph = ipv6_hdr(skb);
198 201
199 read_lock_bh(&l2tp_ip6_lock); 202 read_lock_bh(&l2tp_ip6_lock);
@@ -215,6 +218,12 @@ pass_up:
215 218
216 return sk_receive_skb(sk, skb, 1); 219 return sk_receive_skb(sk, skb, 1);
217 220
221discard_sess:
222 if (session->deref)
223 session->deref(session);
224 l2tp_session_dec_refcount(session);
225 goto discard;
226
218discard_put: 227discard_put:
219 sock_put(sk); 228 sock_put(sk);
220 229
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 3620fba31786..7e3e669baac4 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -48,7 +48,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq,
48/* Accessed under genl lock */ 48/* Accessed under genl lock */
49static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX]; 49static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX];
50 50
51static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info) 51static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info,
52 bool do_ref)
52{ 53{
53 u32 tunnel_id; 54 u32 tunnel_id;
54 u32 session_id; 55 u32 session_id;
@@ -59,14 +60,15 @@ static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
59 60
60 if (info->attrs[L2TP_ATTR_IFNAME]) { 61 if (info->attrs[L2TP_ATTR_IFNAME]) {
61 ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]); 62 ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
62 session = l2tp_session_find_by_ifname(net, ifname); 63 session = l2tp_session_get_by_ifname(net, ifname, do_ref);
63 } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) && 64 } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) &&
64 (info->attrs[L2TP_ATTR_CONN_ID])) { 65 (info->attrs[L2TP_ATTR_CONN_ID])) {
65 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); 66 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
66 session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); 67 session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
67 tunnel = l2tp_tunnel_find(net, tunnel_id); 68 tunnel = l2tp_tunnel_find(net, tunnel_id);
68 if (tunnel) 69 if (tunnel)
69 session = l2tp_session_find(net, tunnel, session_id); 70 session = l2tp_session_get(net, tunnel, session_id,
71 do_ref);
70 } 72 }
71 73
72 return session; 74 return session;
@@ -642,10 +644,12 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
642 session_id, peer_session_id, &cfg); 644 session_id, peer_session_id, &cfg);
643 645
644 if (ret >= 0) { 646 if (ret >= 0) {
645 session = l2tp_session_find(net, tunnel, session_id); 647 session = l2tp_session_get(net, tunnel, session_id, false);
646 if (session) 648 if (session) {
647 ret = l2tp_session_notify(&l2tp_nl_family, info, session, 649 ret = l2tp_session_notify(&l2tp_nl_family, info, session,
648 L2TP_CMD_SESSION_CREATE); 650 L2TP_CMD_SESSION_CREATE);
651 l2tp_session_dec_refcount(session);
652 }
649 } 653 }
650 654
651out: 655out:
@@ -658,7 +662,7 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
658 struct l2tp_session *session; 662 struct l2tp_session *session;
659 u16 pw_type; 663 u16 pw_type;
660 664
661 session = l2tp_nl_session_find(info); 665 session = l2tp_nl_session_get(info, true);
662 if (session == NULL) { 666 if (session == NULL) {
663 ret = -ENODEV; 667 ret = -ENODEV;
664 goto out; 668 goto out;
@@ -672,6 +676,10 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
672 if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete) 676 if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
673 ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session); 677 ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session);
674 678
679 if (session->deref)
680 session->deref(session);
681 l2tp_session_dec_refcount(session);
682
675out: 683out:
676 return ret; 684 return ret;
677} 685}
@@ -681,7 +689,7 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
681 int ret = 0; 689 int ret = 0;
682 struct l2tp_session *session; 690 struct l2tp_session *session;
683 691
684 session = l2tp_nl_session_find(info); 692 session = l2tp_nl_session_get(info, false);
685 if (session == NULL) { 693 if (session == NULL) {
686 ret = -ENODEV; 694 ret = -ENODEV;
687 goto out; 695 goto out;
@@ -716,6 +724,8 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
716 ret = l2tp_session_notify(&l2tp_nl_family, info, 724 ret = l2tp_session_notify(&l2tp_nl_family, info,
717 session, L2TP_CMD_SESSION_MODIFY); 725 session, L2TP_CMD_SESSION_MODIFY);
718 726
727 l2tp_session_dec_refcount(session);
728
719out: 729out:
720 return ret; 730 return ret;
721} 731}
@@ -811,29 +821,34 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
811 struct sk_buff *msg; 821 struct sk_buff *msg;
812 int ret; 822 int ret;
813 823
814 session = l2tp_nl_session_find(info); 824 session = l2tp_nl_session_get(info, false);
815 if (session == NULL) { 825 if (session == NULL) {
816 ret = -ENODEV; 826 ret = -ENODEV;
817 goto out; 827 goto err;
818 } 828 }
819 829
820 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 830 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
821 if (!msg) { 831 if (!msg) {
822 ret = -ENOMEM; 832 ret = -ENOMEM;
823 goto out; 833 goto err_ref;
824 } 834 }
825 835
826 ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq, 836 ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
827 0, session, L2TP_CMD_SESSION_GET); 837 0, session, L2TP_CMD_SESSION_GET);
828 if (ret < 0) 838 if (ret < 0)
829 goto err_out; 839 goto err_ref_msg;
830 840
831 return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid); 841 ret = genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
832 842
833err_out: 843 l2tp_session_dec_refcount(session);
834 nlmsg_free(msg);
835 844
836out: 845 return ret;
846
847err_ref_msg:
848 nlmsg_free(msg);
849err_ref:
850 l2tp_session_dec_refcount(session);
851err:
837 return ret; 852 return ret;
838} 853}
839 854
@@ -852,7 +867,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
852 goto out; 867 goto out;
853 } 868 }
854 869
855 session = l2tp_session_find_nth(tunnel, si); 870 session = l2tp_session_get_nth(tunnel, si, false);
856 if (session == NULL) { 871 if (session == NULL) {
857 ti++; 872 ti++;
858 tunnel = NULL; 873 tunnel = NULL;
@@ -862,8 +877,11 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
862 877
863 if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid, 878 if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
864 cb->nlh->nlmsg_seq, NLM_F_MULTI, 879 cb->nlh->nlmsg_seq, NLM_F_MULTI,
865 session, L2TP_CMD_SESSION_GET) < 0) 880 session, L2TP_CMD_SESSION_GET) < 0) {
881 l2tp_session_dec_refcount(session);
866 break; 882 break;
883 }
884 l2tp_session_dec_refcount(session);
867 885
868 si++; 886 si++;
869 } 887 }
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 36cc56fd0418..32ea0f3d868c 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -450,6 +450,10 @@ static void pppol2tp_session_close(struct l2tp_session *session)
450static void pppol2tp_session_destruct(struct sock *sk) 450static void pppol2tp_session_destruct(struct sock *sk)
451{ 451{
452 struct l2tp_session *session = sk->sk_user_data; 452 struct l2tp_session *session = sk->sk_user_data;
453
454 skb_queue_purge(&sk->sk_receive_queue);
455 skb_queue_purge(&sk->sk_write_queue);
456
453 if (session) { 457 if (session) {
454 sk->sk_user_data = NULL; 458 sk->sk_user_data = NULL;
455 BUG_ON(session->magic != L2TP_SESSION_MAGIC); 459 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
@@ -488,9 +492,6 @@ static int pppol2tp_release(struct socket *sock)
488 l2tp_session_queue_purge(session); 492 l2tp_session_queue_purge(session);
489 sock_put(sk); 493 sock_put(sk);
490 } 494 }
491 skb_queue_purge(&sk->sk_receive_queue);
492 skb_queue_purge(&sk->sk_write_queue);
493
494 release_sock(sk); 495 release_sock(sk);
495 496
496 /* This will delete the session context via 497 /* This will delete the session context via
@@ -582,6 +583,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
582 int error = 0; 583 int error = 0;
583 u32 tunnel_id, peer_tunnel_id; 584 u32 tunnel_id, peer_tunnel_id;
584 u32 session_id, peer_session_id; 585 u32 session_id, peer_session_id;
586 bool drop_refcnt = false;
585 int ver = 2; 587 int ver = 2;
586 int fd; 588 int fd;
587 589
@@ -683,36 +685,36 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
683 if (tunnel->peer_tunnel_id == 0) 685 if (tunnel->peer_tunnel_id == 0)
684 tunnel->peer_tunnel_id = peer_tunnel_id; 686 tunnel->peer_tunnel_id = peer_tunnel_id;
685 687
686 /* Create session if it doesn't already exist. We handle the 688 session = l2tp_session_get(sock_net(sk), tunnel, session_id, false);
687 * case where a session was previously created by the netlink 689 if (session) {
688 * interface by checking that the session doesn't already have 690 drop_refcnt = true;
689 * a socket and its tunnel socket are what we expect. If any 691 ps = l2tp_session_priv(session);
690 * of those checks fail, return EEXIST to the caller. 692
691 */ 693 /* Using a pre-existing session is fine as long as it hasn't
692 session = l2tp_session_find(sock_net(sk), tunnel, session_id); 694 * been connected yet.
693 if (session == NULL) {
694 /* Default MTU must allow space for UDP/L2TP/PPP
695 * headers.
696 */ 695 */
697 cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD; 696 if (ps->sock) {
697 error = -EEXIST;
698 goto end;
699 }
698 700
699 /* Allocate and initialize a new session context. */ 701 /* consistency checks */
700 session = l2tp_session_create(sizeof(struct pppol2tp_session), 702 if (ps->tunnel_sock != tunnel->sock) {
701 tunnel, session_id, 703 error = -EEXIST;
702 peer_session_id, &cfg);
703 if (session == NULL) {
704 error = -ENOMEM;
705 goto end; 704 goto end;
706 } 705 }
707 } else { 706 } else {
708 ps = l2tp_session_priv(session); 707 /* Default MTU must allow space for UDP/L2TP/PPP headers */
709 error = -EEXIST; 708 cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
710 if (ps->sock != NULL) 709 cfg.mru = cfg.mtu;
711 goto end;
712 710
713 /* consistency checks */ 711 session = l2tp_session_create(sizeof(struct pppol2tp_session),
714 if (ps->tunnel_sock != tunnel->sock) 712 tunnel, session_id,
713 peer_session_id, &cfg);
714 if (IS_ERR(session)) {
715 error = PTR_ERR(session);
715 goto end; 716 goto end;
717 }
716 } 718 }
717 719
718 /* Associate session with its PPPoL2TP socket */ 720 /* Associate session with its PPPoL2TP socket */
@@ -777,6 +779,8 @@ out_no_ppp:
777 session->name); 779 session->name);
778 780
779end: 781end:
782 if (drop_refcnt)
783 l2tp_session_dec_refcount(session);
780 release_sock(sk); 784 release_sock(sk);
781 785
782 return error; 786 return error;
@@ -804,12 +808,6 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
804 if (tunnel->sock == NULL) 808 if (tunnel->sock == NULL)
805 goto out; 809 goto out;
806 810
807 /* Check that this session doesn't already exist */
808 error = -EEXIST;
809 session = l2tp_session_find(net, tunnel, session_id);
810 if (session != NULL)
811 goto out;
812
813 /* Default MTU values. */ 811 /* Default MTU values. */
814 if (cfg->mtu == 0) 812 if (cfg->mtu == 0)
815 cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; 813 cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
@@ -817,12 +815,13 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
817 cfg->mru = cfg->mtu; 815 cfg->mru = cfg->mtu;
818 816
819 /* Allocate and initialize a new session context. */ 817 /* Allocate and initialize a new session context. */
820 error = -ENOMEM;
821 session = l2tp_session_create(sizeof(struct pppol2tp_session), 818 session = l2tp_session_create(sizeof(struct pppol2tp_session),
822 tunnel, session_id, 819 tunnel, session_id,
823 peer_session_id, cfg); 820 peer_session_id, cfg);
824 if (session == NULL) 821 if (IS_ERR(session)) {
822 error = PTR_ERR(session);
825 goto out; 823 goto out;
824 }
826 825
827 ps = l2tp_session_priv(session); 826 ps = l2tp_session_priv(session);
828 ps->tunnel_sock = tunnel->sock; 827 ps->tunnel_sock = tunnel->sock;
@@ -1140,11 +1139,18 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
1140 if (stats.session_id != 0) { 1139 if (stats.session_id != 0) {
1141 /* resend to session ioctl handler */ 1140 /* resend to session ioctl handler */
1142 struct l2tp_session *session = 1141 struct l2tp_session *session =
1143 l2tp_session_find(sock_net(sk), tunnel, stats.session_id); 1142 l2tp_session_get(sock_net(sk), tunnel,
1144 if (session != NULL) 1143 stats.session_id, true);
1145 err = pppol2tp_session_ioctl(session, cmd, arg); 1144
1146 else 1145 if (session) {
1146 err = pppol2tp_session_ioctl(session, cmd,
1147 arg);
1148 if (session->deref)
1149 session->deref(session);
1150 l2tp_session_dec_refcount(session);
1151 } else {
1147 err = -EBADR; 1152 err = -EBADR;
1153 }
1148 break; 1154 break;
1149 } 1155 }
1150#ifdef CONFIG_XFRM 1156#ifdef CONFIG_XFRM
@@ -1377,8 +1383,6 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
1377 } else 1383 } else
1378 err = pppol2tp_session_setsockopt(sk, session, optname, val); 1384 err = pppol2tp_session_setsockopt(sk, session, optname, val);
1379 1385
1380 err = 0;
1381
1382end_put_sess: 1386end_put_sess:
1383 sock_put(sk); 1387 sock_put(sk);
1384end: 1388end:
@@ -1501,8 +1505,13 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
1501 1505
1502 err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val); 1506 err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
1503 sock_put(ps->tunnel_sock); 1507 sock_put(ps->tunnel_sock);
1504 } else 1508 if (err)
1509 goto end_put_sess;
1510 } else {
1505 err = pppol2tp_session_getsockopt(sk, session, optname, &val); 1511 err = pppol2tp_session_getsockopt(sk, session, optname, &val);
1512 if (err)
1513 goto end_put_sess;
1514 }
1506 1515
1507 err = -EFAULT; 1516 err = -EFAULT;
1508 if (put_user(len, optlen)) 1517 if (put_user(len, optlen))
@@ -1554,7 +1563,7 @@ static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)
1554 1563
1555static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd) 1564static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
1556{ 1565{
1557 pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); 1566 pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
1558 pd->session_idx++; 1567 pd->session_idx++;
1559 1568
1560 if (pd->session == NULL) { 1569 if (pd->session == NULL) {
@@ -1681,10 +1690,14 @@ static int pppol2tp_seq_show(struct seq_file *m, void *v)
1681 1690
1682 /* Show the tunnel or session context. 1691 /* Show the tunnel or session context.
1683 */ 1692 */
1684 if (pd->session == NULL) 1693 if (!pd->session) {
1685 pppol2tp_seq_tunnel_show(m, pd->tunnel); 1694 pppol2tp_seq_tunnel_show(m, pd->tunnel);
1686 else 1695 } else {
1687 pppol2tp_seq_session_show(m, pd->session); 1696 pppol2tp_seq_session_show(m, pd->session);
1697 if (pd->session->deref)
1698 pd->session->deref(pd->session);
1699 l2tp_session_dec_refcount(pd->session);
1700 }
1688 1701
1689out: 1702out:
1690 return 0; 1703 return 0;
@@ -1843,4 +1856,4 @@ MODULE_DESCRIPTION("PPP over L2TP over UDP");
1843MODULE_LICENSE("GPL"); 1856MODULE_LICENSE("GPL");
1844MODULE_VERSION(PPPOL2TP_DRV_VERSION); 1857MODULE_VERSION(PPPOL2TP_DRV_VERSION);
1845MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OL2TP); 1858MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OL2TP);
1846MODULE_ALIAS_L2TP_PWTYPE(11); 1859MODULE_ALIAS_L2TP_PWTYPE(7);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 40813dd3301c..5bb0c5012819 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -718,7 +718,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
718 ieee80211_recalc_ps(local); 718 ieee80211_recalc_ps(local);
719 719
720 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 720 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
721 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { 721 sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
722 local->ops->wake_tx_queue) {
722 /* XXX: for AP_VLAN, actually track AP queues */ 723 /* XXX: for AP_VLAN, actually track AP queues */
723 netif_tx_start_all_queues(dev); 724 netif_tx_start_all_queues(dev);
724 } else if (dev) { 725 } else if (dev) {
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index e48724a6725e..4d7543d1a62c 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -208,6 +208,51 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
208 return len; 208 return len;
209} 209}
210 210
211static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
212 struct sk_buff *skb,
213 int rtap_vendor_space)
214{
215 struct {
216 struct ieee80211_hdr_3addr hdr;
217 u8 category;
218 u8 action_code;
219 } __packed action;
220
221 if (!sdata)
222 return;
223
224 BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1);
225
226 if (skb->len < rtap_vendor_space + sizeof(action) +
227 VHT_MUMIMO_GROUPS_DATA_LEN)
228 return;
229
230 if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr))
231 return;
232
233 skb_copy_bits(skb, rtap_vendor_space, &action, sizeof(action));
234
235 if (!ieee80211_is_action(action.hdr.frame_control))
236 return;
237
238 if (action.category != WLAN_CATEGORY_VHT)
239 return;
240
241 if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT)
242 return;
243
244 if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr))
245 return;
246
247 skb = skb_copy(skb, GFP_ATOMIC);
248 if (!skb)
249 return;
250
251 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
252 skb_queue_tail(&sdata->skb_queue, skb);
253 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
254}
255
211/* 256/*
212 * ieee80211_add_rx_radiotap_header - add radiotap header 257 * ieee80211_add_rx_radiotap_header - add radiotap header
213 * 258 *
@@ -515,7 +560,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
515 struct net_device *prev_dev = NULL; 560 struct net_device *prev_dev = NULL;
516 int present_fcs_len = 0; 561 int present_fcs_len = 0;
517 unsigned int rtap_vendor_space = 0; 562 unsigned int rtap_vendor_space = 0;
518 struct ieee80211_mgmt *mgmt;
519 struct ieee80211_sub_if_data *monitor_sdata = 563 struct ieee80211_sub_if_data *monitor_sdata =
520 rcu_dereference(local->monitor_sdata); 564 rcu_dereference(local->monitor_sdata);
521 565
@@ -553,6 +597,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
553 return remove_monitor_info(local, origskb, rtap_vendor_space); 597 return remove_monitor_info(local, origskb, rtap_vendor_space);
554 } 598 }
555 599
600 ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_vendor_space);
601
556 /* room for the radiotap header based on driver features */ 602 /* room for the radiotap header based on driver features */
557 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, origskb); 603 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, origskb);
558 needed_headroom = rt_hdrlen - rtap_vendor_space; 604 needed_headroom = rt_hdrlen - rtap_vendor_space;
@@ -618,23 +664,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
618 ieee80211_rx_stats(sdata->dev, skb->len); 664 ieee80211_rx_stats(sdata->dev, skb->len);
619 } 665 }
620 666
621 mgmt = (void *)skb->data;
622 if (monitor_sdata &&
623 skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 + VHT_MUMIMO_GROUPS_DATA_LEN &&
624 ieee80211_is_action(mgmt->frame_control) &&
625 mgmt->u.action.category == WLAN_CATEGORY_VHT &&
626 mgmt->u.action.u.vht_group_notif.action_code == WLAN_VHT_ACTION_GROUPID_MGMT &&
627 is_valid_ether_addr(monitor_sdata->u.mntr.mu_follow_addr) &&
628 ether_addr_equal(mgmt->da, monitor_sdata->u.mntr.mu_follow_addr)) {
629 struct sk_buff *mu_skb = skb_copy(skb, GFP_ATOMIC);
630
631 if (mu_skb) {
632 mu_skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
633 skb_queue_tail(&monitor_sdata->skb_queue, mu_skb);
634 ieee80211_queue_work(&local->hw, &monitor_sdata->work);
635 }
636 }
637
638 if (prev_dev) { 667 if (prev_dev) {
639 skb->dev = prev_dev; 668 skb->dev = prev_dev;
640 netif_receive_skb(skb); 669 netif_receive_skb(skb);
@@ -3610,6 +3639,27 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
3610 !ether_addr_equal(bssid, hdr->addr1)) 3639 !ether_addr_equal(bssid, hdr->addr1))
3611 return false; 3640 return false;
3612 } 3641 }
3642
3643 /*
3644 * 802.11-2016 Table 9-26 says that for data frames, A1 must be
3645 * the BSSID - we've checked that already but may have accepted
3646 * the wildcard (ff:ff:ff:ff:ff:ff).
3647 *
3648 * It also says:
3649 * The BSSID of the Data frame is determined as follows:
3650 * a) If the STA is contained within an AP or is associated
3651 * with an AP, the BSSID is the address currently in use
3652 * by the STA contained in the AP.
3653 *
3654 * So we should not accept data frames with an address that's
3655 * multicast.
3656 *
3657 * Accepting it also opens a security problem because stations
3658 * could encrypt it with the GTK and inject traffic that way.
3659 */
3660 if (ieee80211_is_data(hdr->frame_control) && multicast)
3661 return false;
3662
3613 return true; 3663 return true;
3614 case NL80211_IFTYPE_WDS: 3664 case NL80211_IFTYPE_WDS:
3615 if (bssid || !ieee80211_is_data(hdr->frame_control)) 3665 if (bssid || !ieee80211_is_data(hdr->frame_control))
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 33211f9a2656..6414079aa729 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1269,6 +1269,8 @@ static void mpls_ifdown(struct net_device *dev, int event)
1269{ 1269{
1270 struct mpls_route __rcu **platform_label; 1270 struct mpls_route __rcu **platform_label;
1271 struct net *net = dev_net(dev); 1271 struct net *net = dev_net(dev);
1272 unsigned int nh_flags = RTNH_F_DEAD | RTNH_F_LINKDOWN;
1273 unsigned int alive;
1272 unsigned index; 1274 unsigned index;
1273 1275
1274 platform_label = rtnl_dereference(net->mpls.platform_label); 1276 platform_label = rtnl_dereference(net->mpls.platform_label);
@@ -1278,9 +1280,11 @@ static void mpls_ifdown(struct net_device *dev, int event)
1278 if (!rt) 1280 if (!rt)
1279 continue; 1281 continue;
1280 1282
1283 alive = 0;
1281 change_nexthops(rt) { 1284 change_nexthops(rt) {
1282 if (rtnl_dereference(nh->nh_dev) != dev) 1285 if (rtnl_dereference(nh->nh_dev) != dev)
1283 continue; 1286 goto next;
1287
1284 switch (event) { 1288 switch (event) {
1285 case NETDEV_DOWN: 1289 case NETDEV_DOWN:
1286 case NETDEV_UNREGISTER: 1290 case NETDEV_UNREGISTER:
@@ -1288,13 +1292,16 @@ static void mpls_ifdown(struct net_device *dev, int event)
1288 /* fall through */ 1292 /* fall through */
1289 case NETDEV_CHANGE: 1293 case NETDEV_CHANGE:
1290 nh->nh_flags |= RTNH_F_LINKDOWN; 1294 nh->nh_flags |= RTNH_F_LINKDOWN;
1291 if (event != NETDEV_UNREGISTER)
1292 ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
1293 break; 1295 break;
1294 } 1296 }
1295 if (event == NETDEV_UNREGISTER) 1297 if (event == NETDEV_UNREGISTER)
1296 RCU_INIT_POINTER(nh->nh_dev, NULL); 1298 RCU_INIT_POINTER(nh->nh_dev, NULL);
1299next:
1300 if (!(nh->nh_flags & nh_flags))
1301 alive++;
1297 } endfor_nexthops(rt); 1302 } endfor_nexthops(rt);
1303
1304 WRITE_ONCE(rt->rt_nhn_alive, alive);
1298 } 1305 }
1299} 1306}
1300 1307
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 071b97fcbefb..ffb78e5f7b70 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -181,7 +181,11 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
181unsigned int nf_conntrack_max __read_mostly; 181unsigned int nf_conntrack_max __read_mostly;
182seqcount_t nf_conntrack_generation __read_mostly; 182seqcount_t nf_conntrack_generation __read_mostly;
183 183
184DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked); 184/* nf_conn must be 8 bytes aligned, as the 3 LSB bits are used
185 * for the nfctinfo. We cheat by (ab)using the PER CPU cache line
186 * alignment to enforce this.
187 */
188DEFINE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked);
185EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); 189EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
186 190
187static unsigned int nf_conntrack_hash_rnd __read_mostly; 191static unsigned int nf_conntrack_hash_rnd __read_mostly;
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index da9df2d56e66..22fc32143e9c 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -290,6 +290,7 @@ void nf_conntrack_unregister_notifier(struct net *net,
290 BUG_ON(notify != new); 290 BUG_ON(notify != new);
291 RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL); 291 RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
292 mutex_unlock(&nf_ct_ecache_mutex); 292 mutex_unlock(&nf_ct_ecache_mutex);
293 /* synchronize_rcu() is called from ctnetlink_exit. */
293} 294}
294EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); 295EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
295 296
@@ -326,6 +327,7 @@ void nf_ct_expect_unregister_notifier(struct net *net,
326 BUG_ON(notify != new); 327 BUG_ON(notify != new);
327 RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL); 328 RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
328 mutex_unlock(&nf_ct_ecache_mutex); 329 mutex_unlock(&nf_ct_ecache_mutex);
330 /* synchronize_rcu() is called from ctnetlink_exit. */
329} 331}
330EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier); 332EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
331 333
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 4b2e1fb28bb4..d80073037856 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -57,7 +57,7 @@ void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
57 hlist_del_rcu(&exp->hnode); 57 hlist_del_rcu(&exp->hnode);
58 net->ct.expect_count--; 58 net->ct.expect_count--;
59 59
60 hlist_del(&exp->lnode); 60 hlist_del_rcu(&exp->lnode);
61 master_help->expecting[exp->class]--; 61 master_help->expecting[exp->class]--;
62 62
63 nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report); 63 nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
@@ -363,7 +363,7 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
363 /* two references : one for hash insert, one for the timer */ 363 /* two references : one for hash insert, one for the timer */
364 atomic_add(2, &exp->use); 364 atomic_add(2, &exp->use);
365 365
366 hlist_add_head(&exp->lnode, &master_help->expectations); 366 hlist_add_head_rcu(&exp->lnode, &master_help->expectations);
367 master_help->expecting[exp->class]++; 367 master_help->expecting[exp->class]++;
368 368
369 hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]); 369 hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 02bcf00c2492..008299b7f78f 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -53,7 +53,11 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id,
53 53
54 rcu_read_lock(); 54 rcu_read_lock();
55 t = rcu_dereference(nf_ct_ext_types[id]); 55 t = rcu_dereference(nf_ct_ext_types[id]);
56 BUG_ON(t == NULL); 56 if (!t) {
57 rcu_read_unlock();
58 return NULL;
59 }
60
57 off = ALIGN(sizeof(struct nf_ct_ext), t->align); 61 off = ALIGN(sizeof(struct nf_ct_ext), t->align);
58 len = off + t->len + var_alloc_len; 62 len = off + t->len + var_alloc_len;
59 alloc_size = t->alloc_size + var_alloc_len; 63 alloc_size = t->alloc_size + var_alloc_len;
@@ -88,7 +92,10 @@ void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id,
88 92
89 rcu_read_lock(); 93 rcu_read_lock();
90 t = rcu_dereference(nf_ct_ext_types[id]); 94 t = rcu_dereference(nf_ct_ext_types[id]);
91 BUG_ON(t == NULL); 95 if (!t) {
96 rcu_read_unlock();
97 return NULL;
98 }
92 99
93 newoff = ALIGN(old->len, t->align); 100 newoff = ALIGN(old->len, t->align);
94 newlen = newoff + t->len + var_alloc_len; 101 newlen = newoff + t->len + var_alloc_len;
@@ -175,6 +182,6 @@ void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
175 RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL); 182 RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
176 update_alloc_size(type); 183 update_alloc_size(type);
177 mutex_unlock(&nf_ct_ext_type_mutex); 184 mutex_unlock(&nf_ct_ext_type_mutex);
178 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 185 synchronize_rcu();
179} 186}
180EXPORT_SYMBOL_GPL(nf_ct_extend_unregister); 187EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 6dc44d9b4190..4eeb3418366a 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -158,16 +158,25 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
158{ 158{
159 struct nf_conntrack_helper *h; 159 struct nf_conntrack_helper *h;
160 160
161 rcu_read_lock();
162
161 h = __nf_conntrack_helper_find(name, l3num, protonum); 163 h = __nf_conntrack_helper_find(name, l3num, protonum);
162#ifdef CONFIG_MODULES 164#ifdef CONFIG_MODULES
163 if (h == NULL) { 165 if (h == NULL) {
164 if (request_module("nfct-helper-%s", name) == 0) 166 rcu_read_unlock();
167 if (request_module("nfct-helper-%s", name) == 0) {
168 rcu_read_lock();
165 h = __nf_conntrack_helper_find(name, l3num, protonum); 169 h = __nf_conntrack_helper_find(name, l3num, protonum);
170 } else {
171 return h;
172 }
166 } 173 }
167#endif 174#endif
168 if (h != NULL && !try_module_get(h->me)) 175 if (h != NULL && !try_module_get(h->me))
169 h = NULL; 176 h = NULL;
170 177
178 rcu_read_unlock();
179
171 return h; 180 return h;
172} 181}
173EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get); 182EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get);
@@ -311,38 +320,36 @@ void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n)
311} 320}
312EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister); 321EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister);
313 322
323/* Caller should hold the rcu lock */
314struct nf_ct_helper_expectfn * 324struct nf_ct_helper_expectfn *
315nf_ct_helper_expectfn_find_by_name(const char *name) 325nf_ct_helper_expectfn_find_by_name(const char *name)
316{ 326{
317 struct nf_ct_helper_expectfn *cur; 327 struct nf_ct_helper_expectfn *cur;
318 bool found = false; 328 bool found = false;
319 329
320 rcu_read_lock();
321 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) { 330 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
322 if (!strcmp(cur->name, name)) { 331 if (!strcmp(cur->name, name)) {
323 found = true; 332 found = true;
324 break; 333 break;
325 } 334 }
326 } 335 }
327 rcu_read_unlock();
328 return found ? cur : NULL; 336 return found ? cur : NULL;
329} 337}
330EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_name); 338EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_name);
331 339
340/* Caller should hold the rcu lock */
332struct nf_ct_helper_expectfn * 341struct nf_ct_helper_expectfn *
333nf_ct_helper_expectfn_find_by_symbol(const void *symbol) 342nf_ct_helper_expectfn_find_by_symbol(const void *symbol)
334{ 343{
335 struct nf_ct_helper_expectfn *cur; 344 struct nf_ct_helper_expectfn *cur;
336 bool found = false; 345 bool found = false;
337 346
338 rcu_read_lock();
339 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) { 347 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
340 if (cur->expectfn == symbol) { 348 if (cur->expectfn == symbol) {
341 found = true; 349 found = true;
342 break; 350 break;
343 } 351 }
344 } 352 }
345 rcu_read_unlock();
346 return found ? cur : NULL; 353 return found ? cur : NULL;
347} 354}
348EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol); 355EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 6806b5e73567..dc7dfd68fafe 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1488,11 +1488,16 @@ static int ctnetlink_change_helper(struct nf_conn *ct,
1488 * treat the second attempt as a no-op instead of returning 1488 * treat the second attempt as a no-op instead of returning
1489 * an error. 1489 * an error.
1490 */ 1490 */
1491 if (help && help->helper && 1491 err = -EBUSY;
1492 !strcmp(help->helper->name, helpname)) 1492 if (help) {
1493 return 0; 1493 rcu_read_lock();
1494 else 1494 helper = rcu_dereference(help->helper);
1495 return -EBUSY; 1495 if (helper && !strcmp(helper->name, helpname))
1496 err = 0;
1497 rcu_read_unlock();
1498 }
1499
1500 return err;
1496 } 1501 }
1497 1502
1498 if (!strcmp(helpname, "")) { 1503 if (!strcmp(helpname, "")) {
@@ -1929,9 +1934,9 @@ static int ctnetlink_new_conntrack(struct net *net, struct sock *ctnl,
1929 1934
1930 err = 0; 1935 err = 0;
1931 if (test_bit(IPS_EXPECTED_BIT, &ct->status)) 1936 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
1932 events = IPCT_RELATED; 1937 events = 1 << IPCT_RELATED;
1933 else 1938 else
1934 events = IPCT_NEW; 1939 events = 1 << IPCT_NEW;
1935 1940
1936 if (cda[CTA_LABELS] && 1941 if (cda[CTA_LABELS] &&
1937 ctnetlink_attach_labels(ct, cda) == 0) 1942 ctnetlink_attach_labels(ct, cda) == 0)
@@ -2675,8 +2680,8 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2675 last = (struct nf_conntrack_expect *)cb->args[1]; 2680 last = (struct nf_conntrack_expect *)cb->args[1];
2676 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { 2681 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
2677restart: 2682restart:
2678 hlist_for_each_entry(exp, &nf_ct_expect_hash[cb->args[0]], 2683 hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]],
2679 hnode) { 2684 hnode) {
2680 if (l3proto && exp->tuple.src.l3num != l3proto) 2685 if (l3proto && exp->tuple.src.l3num != l3proto)
2681 continue; 2686 continue;
2682 2687
@@ -2727,7 +2732,7 @@ ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2727 rcu_read_lock(); 2732 rcu_read_lock();
2728 last = (struct nf_conntrack_expect *)cb->args[1]; 2733 last = (struct nf_conntrack_expect *)cb->args[1];
2729restart: 2734restart:
2730 hlist_for_each_entry(exp, &help->expectations, lnode) { 2735 hlist_for_each_entry_rcu(exp, &help->expectations, lnode) {
2731 if (l3proto && exp->tuple.src.l3num != l3proto) 2736 if (l3proto && exp->tuple.src.l3num != l3proto)
2732 continue; 2737 continue;
2733 if (cb->args[1]) { 2738 if (cb->args[1]) {
@@ -2789,6 +2794,12 @@ static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl,
2789 return -ENOENT; 2794 return -ENOENT;
2790 2795
2791 ct = nf_ct_tuplehash_to_ctrack(h); 2796 ct = nf_ct_tuplehash_to_ctrack(h);
2797 /* No expectation linked to this connection tracking. */
2798 if (!nfct_help(ct)) {
2799 nf_ct_put(ct);
2800 return 0;
2801 }
2802
2792 c.data = ct; 2803 c.data = ct;
2793 2804
2794 err = netlink_dump_start(ctnl, skb, nlh, &c); 2805 err = netlink_dump_start(ctnl, skb, nlh, &c);
@@ -3133,23 +3144,27 @@ ctnetlink_create_expect(struct net *net,
3133 return -ENOENT; 3144 return -ENOENT;
3134 ct = nf_ct_tuplehash_to_ctrack(h); 3145 ct = nf_ct_tuplehash_to_ctrack(h);
3135 3146
3147 rcu_read_lock();
3136 if (cda[CTA_EXPECT_HELP_NAME]) { 3148 if (cda[CTA_EXPECT_HELP_NAME]) {
3137 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]); 3149 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
3138 3150
3139 helper = __nf_conntrack_helper_find(helpname, u3, 3151 helper = __nf_conntrack_helper_find(helpname, u3,
3140 nf_ct_protonum(ct)); 3152 nf_ct_protonum(ct));
3141 if (helper == NULL) { 3153 if (helper == NULL) {
3154 rcu_read_unlock();
3142#ifdef CONFIG_MODULES 3155#ifdef CONFIG_MODULES
3143 if (request_module("nfct-helper-%s", helpname) < 0) { 3156 if (request_module("nfct-helper-%s", helpname) < 0) {
3144 err = -EOPNOTSUPP; 3157 err = -EOPNOTSUPP;
3145 goto err_ct; 3158 goto err_ct;
3146 } 3159 }
3160 rcu_read_lock();
3147 helper = __nf_conntrack_helper_find(helpname, u3, 3161 helper = __nf_conntrack_helper_find(helpname, u3,
3148 nf_ct_protonum(ct)); 3162 nf_ct_protonum(ct));
3149 if (helper) { 3163 if (helper) {
3150 err = -EAGAIN; 3164 err = -EAGAIN;
3151 goto err_ct; 3165 goto err_rcu;
3152 } 3166 }
3167 rcu_read_unlock();
3153#endif 3168#endif
3154 err = -EOPNOTSUPP; 3169 err = -EOPNOTSUPP;
3155 goto err_ct; 3170 goto err_ct;
@@ -3159,11 +3174,13 @@ ctnetlink_create_expect(struct net *net,
3159 exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask); 3174 exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
3160 if (IS_ERR(exp)) { 3175 if (IS_ERR(exp)) {
3161 err = PTR_ERR(exp); 3176 err = PTR_ERR(exp);
3162 goto err_ct; 3177 goto err_rcu;
3163 } 3178 }
3164 3179
3165 err = nf_ct_expect_related_report(exp, portid, report); 3180 err = nf_ct_expect_related_report(exp, portid, report);
3166 nf_ct_expect_put(exp); 3181 nf_ct_expect_put(exp);
3182err_rcu:
3183 rcu_read_unlock();
3167err_ct: 3184err_ct:
3168 nf_ct_put(ct); 3185 nf_ct_put(ct);
3169 return err; 3186 return err;
@@ -3442,6 +3459,7 @@ static void __exit ctnetlink_exit(void)
3442#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT 3459#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
3443 RCU_INIT_POINTER(nfnl_ct_hook, NULL); 3460 RCU_INIT_POINTER(nfnl_ct_hook, NULL);
3444#endif 3461#endif
3462 synchronize_rcu();
3445} 3463}
3446 3464
3447module_init(ctnetlink_init); 3465module_init(ctnetlink_init);
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 94b14c5a8b17..82802e4a6640 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -903,6 +903,8 @@ static void __exit nf_nat_cleanup(void)
903#ifdef CONFIG_XFRM 903#ifdef CONFIG_XFRM
904 RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL); 904 RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
905#endif 905#endif
906 synchronize_rcu();
907
906 for (i = 0; i < NFPROTO_NUMPROTO; i++) 908 for (i = 0; i < NFPROTO_NUMPROTO; i++)
907 kfree(nf_nat_l4protos[i]); 909 kfree(nf_nat_l4protos[i]);
908 910
diff --git a/net/netfilter/nf_nat_proto_sctp.c b/net/netfilter/nf_nat_proto_sctp.c
index 31d358691af0..804e8a0ab36e 100644
--- a/net/netfilter/nf_nat_proto_sctp.c
+++ b/net/netfilter/nf_nat_proto_sctp.c
@@ -33,8 +33,16 @@ sctp_manip_pkt(struct sk_buff *skb,
33 enum nf_nat_manip_type maniptype) 33 enum nf_nat_manip_type maniptype)
34{ 34{
35 sctp_sctphdr_t *hdr; 35 sctp_sctphdr_t *hdr;
36 int hdrsize = 8;
36 37
37 if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) 38 /* This could be an inner header returned in imcp packet; in such
39 * cases we cannot update the checksum field since it is outside
40 * of the 8 bytes of transport layer headers we are guaranteed.
41 */
42 if (skb->len >= hdroff + sizeof(*hdr))
43 hdrsize = sizeof(*hdr);
44
45 if (!skb_make_writable(skb, hdroff + hdrsize))
38 return false; 46 return false;
39 47
40 hdr = (struct sctphdr *)(skb->data + hdroff); 48 hdr = (struct sctphdr *)(skb->data + hdroff);
@@ -47,6 +55,9 @@ sctp_manip_pkt(struct sk_buff *skb,
47 hdr->dest = tuple->dst.u.sctp.port; 55 hdr->dest = tuple->dst.u.sctp.port;
48 } 56 }
49 57
58 if (hdrsize < sizeof(*hdr))
59 return true;
60
50 if (skb->ip_summed != CHECKSUM_PARTIAL) { 61 if (skb->ip_summed != CHECKSUM_PARTIAL) {
51 hdr->checksum = sctp_compute_cksum(skb, hdroff); 62 hdr->checksum = sctp_compute_cksum(skb, hdroff);
52 skb->ip_summed = CHECKSUM_NONE; 63 skb->ip_summed = CHECKSUM_NONE;
diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c
index d43869879fcf..86067560a318 100644
--- a/net/netfilter/nf_nat_redirect.c
+++ b/net/netfilter/nf_nat_redirect.c
@@ -101,11 +101,13 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
101 rcu_read_lock(); 101 rcu_read_lock();
102 idev = __in6_dev_get(skb->dev); 102 idev = __in6_dev_get(skb->dev);
103 if (idev != NULL) { 103 if (idev != NULL) {
104 read_lock_bh(&idev->lock);
104 list_for_each_entry(ifa, &idev->addr_list, if_list) { 105 list_for_each_entry(ifa, &idev->addr_list, if_list) {
105 newdst = ifa->addr; 106 newdst = ifa->addr;
106 addr = true; 107 addr = true;
107 break; 108 break;
108 } 109 }
110 read_unlock_bh(&idev->lock);
109 } 111 }
110 rcu_read_unlock(); 112 rcu_read_unlock();
111 113
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 5e0ccfd5bb37..434c739dfeca 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3145,7 +3145,6 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
3145 iter.count = 0; 3145 iter.count = 0;
3146 iter.err = 0; 3146 iter.err = 0;
3147 iter.fn = nf_tables_bind_check_setelem; 3147 iter.fn = nf_tables_bind_check_setelem;
3148 iter.flush = false;
3149 3148
3150 set->ops->walk(ctx, set, &iter); 3149 set->ops->walk(ctx, set, &iter);
3151 if (iter.err < 0) 3150 if (iter.err < 0)
@@ -3399,7 +3398,6 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3399 args.iter.count = 0; 3398 args.iter.count = 0;
3400 args.iter.err = 0; 3399 args.iter.err = 0;
3401 args.iter.fn = nf_tables_dump_setelem; 3400 args.iter.fn = nf_tables_dump_setelem;
3402 args.iter.flush = false;
3403 set->ops->walk(&ctx, set, &args.iter); 3401 set->ops->walk(&ctx, set, &args.iter);
3404 3402
3405 nla_nest_end(skb, nest); 3403 nla_nest_end(skb, nest);
@@ -3963,7 +3961,6 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk,
3963 struct nft_set_iter iter = { 3961 struct nft_set_iter iter = {
3964 .genmask = genmask, 3962 .genmask = genmask,
3965 .fn = nft_flush_set, 3963 .fn = nft_flush_set,
3966 .flush = true,
3967 }; 3964 };
3968 set->ops->walk(&ctx, set, &iter); 3965 set->ops->walk(&ctx, set, &iter);
3969 3966
@@ -5114,7 +5111,6 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
5114 iter.count = 0; 5111 iter.count = 0;
5115 iter.err = 0; 5112 iter.err = 0;
5116 iter.fn = nf_tables_loop_check_setelem; 5113 iter.fn = nf_tables_loop_check_setelem;
5117 iter.flush = false;
5118 5114
5119 set->ops->walk(ctx, set, &iter); 5115 set->ops->walk(ctx, set, &iter);
5120 if (iter.err < 0) 5116 if (iter.err < 0)
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index de8782345c86..d45558178da5 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -32,6 +32,13 @@ MODULE_LICENSE("GPL");
32MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); 32MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
33MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers"); 33MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers");
34 34
35struct nfnl_cthelper {
36 struct list_head list;
37 struct nf_conntrack_helper helper;
38};
39
40static LIST_HEAD(nfnl_cthelper_list);
41
35static int 42static int
36nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff, 43nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff,
37 struct nf_conn *ct, enum ip_conntrack_info ctinfo) 44 struct nf_conn *ct, enum ip_conntrack_info ctinfo)
@@ -161,6 +168,7 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
161 int i, ret; 168 int i, ret;
162 struct nf_conntrack_expect_policy *expect_policy; 169 struct nf_conntrack_expect_policy *expect_policy;
163 struct nlattr *tb[NFCTH_POLICY_SET_MAX+1]; 170 struct nlattr *tb[NFCTH_POLICY_SET_MAX+1];
171 unsigned int class_max;
164 172
165 ret = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr, 173 ret = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
166 nfnl_cthelper_expect_policy_set); 174 nfnl_cthelper_expect_policy_set);
@@ -170,19 +178,18 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
170 if (!tb[NFCTH_POLICY_SET_NUM]) 178 if (!tb[NFCTH_POLICY_SET_NUM])
171 return -EINVAL; 179 return -EINVAL;
172 180
173 helper->expect_class_max = 181 class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
174 ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM])); 182 if (class_max == 0)
175 183 return -EINVAL;
176 if (helper->expect_class_max != 0 && 184 if (class_max > NF_CT_MAX_EXPECT_CLASSES)
177 helper->expect_class_max > NF_CT_MAX_EXPECT_CLASSES)
178 return -EOVERFLOW; 185 return -EOVERFLOW;
179 186
180 expect_policy = kzalloc(sizeof(struct nf_conntrack_expect_policy) * 187 expect_policy = kzalloc(sizeof(struct nf_conntrack_expect_policy) *
181 helper->expect_class_max, GFP_KERNEL); 188 class_max, GFP_KERNEL);
182 if (expect_policy == NULL) 189 if (expect_policy == NULL)
183 return -ENOMEM; 190 return -ENOMEM;
184 191
185 for (i=0; i<helper->expect_class_max; i++) { 192 for (i = 0; i < class_max; i++) {
186 if (!tb[NFCTH_POLICY_SET+i]) 193 if (!tb[NFCTH_POLICY_SET+i])
187 goto err; 194 goto err;
188 195
@@ -191,6 +198,8 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
191 if (ret < 0) 198 if (ret < 0)
192 goto err; 199 goto err;
193 } 200 }
201
202 helper->expect_class_max = class_max - 1;
194 helper->expect_policy = expect_policy; 203 helper->expect_policy = expect_policy;
195 return 0; 204 return 0;
196err: 205err:
@@ -203,18 +212,20 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
203 struct nf_conntrack_tuple *tuple) 212 struct nf_conntrack_tuple *tuple)
204{ 213{
205 struct nf_conntrack_helper *helper; 214 struct nf_conntrack_helper *helper;
215 struct nfnl_cthelper *nfcth;
206 int ret; 216 int ret;
207 217
208 if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN]) 218 if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN])
209 return -EINVAL; 219 return -EINVAL;
210 220
211 helper = kzalloc(sizeof(struct nf_conntrack_helper), GFP_KERNEL); 221 nfcth = kzalloc(sizeof(*nfcth), GFP_KERNEL);
212 if (helper == NULL) 222 if (nfcth == NULL)
213 return -ENOMEM; 223 return -ENOMEM;
224 helper = &nfcth->helper;
214 225
215 ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]); 226 ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]);
216 if (ret < 0) 227 if (ret < 0)
217 goto err; 228 goto err1;
218 229
219 strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN); 230 strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN);
220 helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN])); 231 helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
@@ -245,15 +256,101 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
245 256
246 ret = nf_conntrack_helper_register(helper); 257 ret = nf_conntrack_helper_register(helper);
247 if (ret < 0) 258 if (ret < 0)
248 goto err; 259 goto err2;
249 260
261 list_add_tail(&nfcth->list, &nfnl_cthelper_list);
250 return 0; 262 return 0;
251err: 263err2:
252 kfree(helper); 264 kfree(helper->expect_policy);
265err1:
266 kfree(nfcth);
253 return ret; 267 return ret;
254} 268}
255 269
256static int 270static int
271nfnl_cthelper_update_policy_one(const struct nf_conntrack_expect_policy *policy,
272 struct nf_conntrack_expect_policy *new_policy,
273 const struct nlattr *attr)
274{
275 struct nlattr *tb[NFCTH_POLICY_MAX + 1];
276 int err;
277
278 err = nla_parse_nested(tb, NFCTH_POLICY_MAX, attr,
279 nfnl_cthelper_expect_pol);
280 if (err < 0)
281 return err;
282
283 if (!tb[NFCTH_POLICY_NAME] ||
284 !tb[NFCTH_POLICY_EXPECT_MAX] ||
285 !tb[NFCTH_POLICY_EXPECT_TIMEOUT])
286 return -EINVAL;
287
288 if (nla_strcmp(tb[NFCTH_POLICY_NAME], policy->name))
289 return -EBUSY;
290
291 new_policy->max_expected =
292 ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
293 new_policy->timeout =
294 ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT]));
295
296 return 0;
297}
298
299static int nfnl_cthelper_update_policy_all(struct nlattr *tb[],
300 struct nf_conntrack_helper *helper)
301{
302 struct nf_conntrack_expect_policy new_policy[helper->expect_class_max + 1];
303 struct nf_conntrack_expect_policy *policy;
304 int i, err;
305
306 /* Check first that all policy attributes are well-formed, so we don't
307 * leave things in inconsistent state on errors.
308 */
309 for (i = 0; i < helper->expect_class_max + 1; i++) {
310
311 if (!tb[NFCTH_POLICY_SET + i])
312 return -EINVAL;
313
314 err = nfnl_cthelper_update_policy_one(&helper->expect_policy[i],
315 &new_policy[i],
316 tb[NFCTH_POLICY_SET + i]);
317 if (err < 0)
318 return err;
319 }
320 /* Now we can safely update them. */
321 for (i = 0; i < helper->expect_class_max + 1; i++) {
322 policy = (struct nf_conntrack_expect_policy *)
323 &helper->expect_policy[i];
324 policy->max_expected = new_policy->max_expected;
325 policy->timeout = new_policy->timeout;
326 }
327
328 return 0;
329}
330
331static int nfnl_cthelper_update_policy(struct nf_conntrack_helper *helper,
332 const struct nlattr *attr)
333{
334 struct nlattr *tb[NFCTH_POLICY_SET_MAX + 1];
335 unsigned int class_max;
336 int err;
337
338 err = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
339 nfnl_cthelper_expect_policy_set);
340 if (err < 0)
341 return err;
342
343 if (!tb[NFCTH_POLICY_SET_NUM])
344 return -EINVAL;
345
346 class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
347 if (helper->expect_class_max + 1 != class_max)
348 return -EBUSY;
349
350 return nfnl_cthelper_update_policy_all(tb, helper);
351}
352
353static int
257nfnl_cthelper_update(const struct nlattr * const tb[], 354nfnl_cthelper_update(const struct nlattr * const tb[],
258 struct nf_conntrack_helper *helper) 355 struct nf_conntrack_helper *helper)
259{ 356{
@@ -263,8 +360,7 @@ nfnl_cthelper_update(const struct nlattr * const tb[],
263 return -EBUSY; 360 return -EBUSY;
264 361
265 if (tb[NFCTH_POLICY]) { 362 if (tb[NFCTH_POLICY]) {
266 ret = nfnl_cthelper_parse_expect_policy(helper, 363 ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
267 tb[NFCTH_POLICY]);
268 if (ret < 0) 364 if (ret < 0)
269 return ret; 365 return ret;
270 } 366 }
@@ -293,7 +389,8 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
293 const char *helper_name; 389 const char *helper_name;
294 struct nf_conntrack_helper *cur, *helper = NULL; 390 struct nf_conntrack_helper *cur, *helper = NULL;
295 struct nf_conntrack_tuple tuple; 391 struct nf_conntrack_tuple tuple;
296 int ret = 0, i; 392 struct nfnl_cthelper *nlcth;
393 int ret = 0;
297 394
298 if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE]) 395 if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
299 return -EINVAL; 396 return -EINVAL;
@@ -304,31 +401,22 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
304 if (ret < 0) 401 if (ret < 0)
305 return ret; 402 return ret;
306 403
307 rcu_read_lock(); 404 list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
308 for (i = 0; i < nf_ct_helper_hsize && !helper; i++) { 405 cur = &nlcth->helper;
309 hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
310 406
311 /* skip non-userspace conntrack helpers. */ 407 if (strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
312 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) 408 continue;
313 continue;
314 409
315 if (strncmp(cur->name, helper_name, 410 if ((tuple.src.l3num != cur->tuple.src.l3num ||
316 NF_CT_HELPER_NAME_LEN) != 0) 411 tuple.dst.protonum != cur->tuple.dst.protonum))
317 continue; 412 continue;
318 413
319 if ((tuple.src.l3num != cur->tuple.src.l3num || 414 if (nlh->nlmsg_flags & NLM_F_EXCL)
320 tuple.dst.protonum != cur->tuple.dst.protonum)) 415 return -EEXIST;
321 continue;
322 416
323 if (nlh->nlmsg_flags & NLM_F_EXCL) { 417 helper = cur;
324 ret = -EEXIST; 418 break;
325 goto err;
326 }
327 helper = cur;
328 break;
329 }
330 } 419 }
331 rcu_read_unlock();
332 420
333 if (helper == NULL) 421 if (helper == NULL)
334 ret = nfnl_cthelper_create(tb, &tuple); 422 ret = nfnl_cthelper_create(tb, &tuple);
@@ -336,9 +424,6 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
336 ret = nfnl_cthelper_update(tb, helper); 424 ret = nfnl_cthelper_update(tb, helper);
337 425
338 return ret; 426 return ret;
339err:
340 rcu_read_unlock();
341 return ret;
342} 427}
343 428
344static int 429static int
@@ -377,10 +462,10 @@ nfnl_cthelper_dump_policy(struct sk_buff *skb,
377 goto nla_put_failure; 462 goto nla_put_failure;
378 463
379 if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM, 464 if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM,
380 htonl(helper->expect_class_max))) 465 htonl(helper->expect_class_max + 1)))
381 goto nla_put_failure; 466 goto nla_put_failure;
382 467
383 for (i=0; i<helper->expect_class_max; i++) { 468 for (i = 0; i < helper->expect_class_max + 1; i++) {
384 nest_parms2 = nla_nest_start(skb, 469 nest_parms2 = nla_nest_start(skb,
385 (NFCTH_POLICY_SET+i) | NLA_F_NESTED); 470 (NFCTH_POLICY_SET+i) | NLA_F_NESTED);
386 if (nest_parms2 == NULL) 471 if (nest_parms2 == NULL)
@@ -502,11 +587,12 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
502 struct sk_buff *skb, const struct nlmsghdr *nlh, 587 struct sk_buff *skb, const struct nlmsghdr *nlh,
503 const struct nlattr * const tb[]) 588 const struct nlattr * const tb[])
504{ 589{
505 int ret = -ENOENT, i; 590 int ret = -ENOENT;
506 struct nf_conntrack_helper *cur; 591 struct nf_conntrack_helper *cur;
507 struct sk_buff *skb2; 592 struct sk_buff *skb2;
508 char *helper_name = NULL; 593 char *helper_name = NULL;
509 struct nf_conntrack_tuple tuple; 594 struct nf_conntrack_tuple tuple;
595 struct nfnl_cthelper *nlcth;
510 bool tuple_set = false; 596 bool tuple_set = false;
511 597
512 if (nlh->nlmsg_flags & NLM_F_DUMP) { 598 if (nlh->nlmsg_flags & NLM_F_DUMP) {
@@ -527,45 +613,39 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
527 tuple_set = true; 613 tuple_set = true;
528 } 614 }
529 615
530 for (i = 0; i < nf_ct_helper_hsize; i++) { 616 list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
531 hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) { 617 cur = &nlcth->helper;
618 if (helper_name &&
619 strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
620 continue;
532 621
533 /* skip non-userspace conntrack helpers. */ 622 if (tuple_set &&
534 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) 623 (tuple.src.l3num != cur->tuple.src.l3num ||
535 continue; 624 tuple.dst.protonum != cur->tuple.dst.protonum))
625 continue;
536 626
537 if (helper_name && strncmp(cur->name, helper_name, 627 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
538 NF_CT_HELPER_NAME_LEN) != 0) { 628 if (skb2 == NULL) {
539 continue; 629 ret = -ENOMEM;
540 } 630 break;
541 if (tuple_set && 631 }
542 (tuple.src.l3num != cur->tuple.src.l3num ||
543 tuple.dst.protonum != cur->tuple.dst.protonum))
544 continue;
545
546 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
547 if (skb2 == NULL) {
548 ret = -ENOMEM;
549 break;
550 }
551 632
552 ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid, 633 ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
553 nlh->nlmsg_seq, 634 nlh->nlmsg_seq,
554 NFNL_MSG_TYPE(nlh->nlmsg_type), 635 NFNL_MSG_TYPE(nlh->nlmsg_type),
555 NFNL_MSG_CTHELPER_NEW, cur); 636 NFNL_MSG_CTHELPER_NEW, cur);
556 if (ret <= 0) { 637 if (ret <= 0) {
557 kfree_skb(skb2); 638 kfree_skb(skb2);
558 break; 639 break;
559 } 640 }
560 641
561 ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid, 642 ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
562 MSG_DONTWAIT); 643 MSG_DONTWAIT);
563 if (ret > 0) 644 if (ret > 0)
564 ret = 0; 645 ret = 0;
565 646
566 /* this avoids a loop in nfnetlink. */ 647 /* this avoids a loop in nfnetlink. */
567 return ret == -EAGAIN ? -ENOBUFS : ret; 648 return ret == -EAGAIN ? -ENOBUFS : ret;
568 }
569 } 649 }
570 return ret; 650 return ret;
571} 651}
@@ -576,10 +656,10 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
576{ 656{
577 char *helper_name = NULL; 657 char *helper_name = NULL;
578 struct nf_conntrack_helper *cur; 658 struct nf_conntrack_helper *cur;
579 struct hlist_node *tmp;
580 struct nf_conntrack_tuple tuple; 659 struct nf_conntrack_tuple tuple;
581 bool tuple_set = false, found = false; 660 bool tuple_set = false, found = false;
582 int i, j = 0, ret; 661 struct nfnl_cthelper *nlcth, *n;
662 int j = 0, ret;
583 663
584 if (tb[NFCTH_NAME]) 664 if (tb[NFCTH_NAME])
585 helper_name = nla_data(tb[NFCTH_NAME]); 665 helper_name = nla_data(tb[NFCTH_NAME]);
@@ -592,28 +672,27 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
592 tuple_set = true; 672 tuple_set = true;
593 } 673 }
594 674
595 for (i = 0; i < nf_ct_helper_hsize; i++) { 675 list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
596 hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i], 676 cur = &nlcth->helper;
597 hnode) { 677 j++;
598 /* skip non-userspace conntrack helpers. */
599 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
600 continue;
601 678
602 j++; 679 if (helper_name &&
680 strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
681 continue;
603 682
604 if (helper_name && strncmp(cur->name, helper_name, 683 if (tuple_set &&
605 NF_CT_HELPER_NAME_LEN) != 0) { 684 (tuple.src.l3num != cur->tuple.src.l3num ||
606 continue; 685 tuple.dst.protonum != cur->tuple.dst.protonum))
607 } 686 continue;
608 if (tuple_set &&
609 (tuple.src.l3num != cur->tuple.src.l3num ||
610 tuple.dst.protonum != cur->tuple.dst.protonum))
611 continue;
612 687
613 found = true; 688 found = true;
614 nf_conntrack_helper_unregister(cur); 689 nf_conntrack_helper_unregister(cur);
615 } 690 kfree(cur->expect_policy);
691
692 list_del(&nlcth->list);
693 kfree(nlcth);
616 } 694 }
695
617 /* Make sure we return success if we flush and there is no helpers */ 696 /* Make sure we return success if we flush and there is no helpers */
618 return (found || j == 0) ? 0 : -ENOENT; 697 return (found || j == 0) ? 0 : -ENOENT;
619} 698}
@@ -662,20 +741,16 @@ err_out:
662static void __exit nfnl_cthelper_exit(void) 741static void __exit nfnl_cthelper_exit(void)
663{ 742{
664 struct nf_conntrack_helper *cur; 743 struct nf_conntrack_helper *cur;
665 struct hlist_node *tmp; 744 struct nfnl_cthelper *nlcth, *n;
666 int i;
667 745
668 nfnetlink_subsys_unregister(&nfnl_cthelper_subsys); 746 nfnetlink_subsys_unregister(&nfnl_cthelper_subsys);
669 747
670 for (i=0; i<nf_ct_helper_hsize; i++) { 748 list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
671 hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i], 749 cur = &nlcth->helper;
672 hnode) {
673 /* skip non-userspace conntrack helpers. */
674 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
675 continue;
676 750
677 nf_conntrack_helper_unregister(cur); 751 nf_conntrack_helper_unregister(cur);
678 } 752 kfree(cur->expect_policy);
753 kfree(nlcth);
679 } 754 }
680} 755}
681 756
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 139e0867e56e..47d6656c9119 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -646,8 +646,8 @@ static void __exit cttimeout_exit(void)
646#ifdef CONFIG_NF_CONNTRACK_TIMEOUT 646#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
647 RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL); 647 RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL);
648 RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL); 648 RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL);
649 synchronize_rcu();
649#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 650#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
650 rcu_barrier();
651} 651}
652 652
653module_init(cttimeout_init); 653module_init(cttimeout_init);
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 3ee0b8a000a4..933509ebf3d3 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -443,7 +443,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
443 skb = alloc_skb(size, GFP_ATOMIC); 443 skb = alloc_skb(size, GFP_ATOMIC);
444 if (!skb) { 444 if (!skb) {
445 skb_tx_error(entskb); 445 skb_tx_error(entskb);
446 return NULL; 446 goto nlmsg_failure;
447 } 447 }
448 448
449 nlh = nlmsg_put(skb, 0, 0, 449 nlh = nlmsg_put(skb, 0, 0,
@@ -452,7 +452,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
452 if (!nlh) { 452 if (!nlh) {
453 skb_tx_error(entskb); 453 skb_tx_error(entskb);
454 kfree_skb(skb); 454 kfree_skb(skb);
455 return NULL; 455 goto nlmsg_failure;
456 } 456 }
457 nfmsg = nlmsg_data(nlh); 457 nfmsg = nlmsg_data(nlh);
458 nfmsg->nfgen_family = entry->state.pf; 458 nfmsg->nfgen_family = entry->state.pf;
@@ -598,12 +598,17 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
598 } 598 }
599 599
600 nlh->nlmsg_len = skb->len; 600 nlh->nlmsg_len = skb->len;
601 if (seclen)
602 security_release_secctx(secdata, seclen);
601 return skb; 603 return skb;
602 604
603nla_put_failure: 605nla_put_failure:
604 skb_tx_error(entskb); 606 skb_tx_error(entskb);
605 kfree_skb(skb); 607 kfree_skb(skb);
606 net_err_ratelimited("nf_queue: error creating packet message\n"); 608 net_err_ratelimited("nf_queue: error creating packet message\n");
609nlmsg_failure:
610 if (seclen)
611 security_release_secctx(secdata, seclen);
607 return NULL; 612 return NULL;
608} 613}
609 614
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index bf548a7a71ec..0264258c46fe 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -83,7 +83,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
83 83
84 switch (priv->key) { 84 switch (priv->key) {
85 case NFT_CT_DIRECTION: 85 case NFT_CT_DIRECTION:
86 *dest = CTINFO2DIR(ctinfo); 86 nft_reg_store8(dest, CTINFO2DIR(ctinfo));
87 return; 87 return;
88 case NFT_CT_STATUS: 88 case NFT_CT_STATUS:
89 *dest = ct->status; 89 *dest = ct->status;
@@ -151,20 +151,22 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
151 return; 151 return;
152 } 152 }
153 case NFT_CT_L3PROTOCOL: 153 case NFT_CT_L3PROTOCOL:
154 *dest = nf_ct_l3num(ct); 154 nft_reg_store8(dest, nf_ct_l3num(ct));
155 return; 155 return;
156 case NFT_CT_PROTOCOL: 156 case NFT_CT_PROTOCOL:
157 *dest = nf_ct_protonum(ct); 157 nft_reg_store8(dest, nf_ct_protonum(ct));
158 return; 158 return;
159#ifdef CONFIG_NF_CONNTRACK_ZONES 159#ifdef CONFIG_NF_CONNTRACK_ZONES
160 case NFT_CT_ZONE: { 160 case NFT_CT_ZONE: {
161 const struct nf_conntrack_zone *zone = nf_ct_zone(ct); 161 const struct nf_conntrack_zone *zone = nf_ct_zone(ct);
162 u16 zoneid;
162 163
163 if (priv->dir < IP_CT_DIR_MAX) 164 if (priv->dir < IP_CT_DIR_MAX)
164 *dest = nf_ct_zone_id(zone, priv->dir); 165 zoneid = nf_ct_zone_id(zone, priv->dir);
165 else 166 else
166 *dest = zone->id; 167 zoneid = zone->id;
167 168
169 nft_reg_store16(dest, zoneid);
168 return; 170 return;
169 } 171 }
170#endif 172#endif
@@ -183,10 +185,10 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
183 nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16); 185 nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
184 return; 186 return;
185 case NFT_CT_PROTO_SRC: 187 case NFT_CT_PROTO_SRC:
186 *dest = (__force __u16)tuple->src.u.all; 188 nft_reg_store16(dest, (__force u16)tuple->src.u.all);
187 return; 189 return;
188 case NFT_CT_PROTO_DST: 190 case NFT_CT_PROTO_DST:
189 *dest = (__force __u16)tuple->dst.u.all; 191 nft_reg_store16(dest, (__force u16)tuple->dst.u.all);
190 return; 192 return;
191 default: 193 default:
192 break; 194 break;
@@ -205,7 +207,7 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr,
205 const struct nft_ct *priv = nft_expr_priv(expr); 207 const struct nft_ct *priv = nft_expr_priv(expr);
206 struct sk_buff *skb = pkt->skb; 208 struct sk_buff *skb = pkt->skb;
207 enum ip_conntrack_info ctinfo; 209 enum ip_conntrack_info ctinfo;
208 u16 value = regs->data[priv->sreg]; 210 u16 value = nft_reg_load16(&regs->data[priv->sreg]);
209 struct nf_conn *ct; 211 struct nf_conn *ct;
210 212
211 ct = nf_ct_get(skb, &ctinfo); 213 ct = nf_ct_get(skb, &ctinfo);
@@ -542,7 +544,8 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
542 case IP_CT_DIR_REPLY: 544 case IP_CT_DIR_REPLY:
543 break; 545 break;
544 default: 546 default:
545 return -EINVAL; 547 err = -EINVAL;
548 goto err1;
546 } 549 }
547 } 550 }
548 551
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index eb2721af898d..c4dad1254ead 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -21,6 +21,7 @@ struct nft_hash {
21 enum nft_registers sreg:8; 21 enum nft_registers sreg:8;
22 enum nft_registers dreg:8; 22 enum nft_registers dreg:8;
23 u8 len; 23 u8 len;
24 bool autogen_seed:1;
24 u32 modulus; 25 u32 modulus;
25 u32 seed; 26 u32 seed;
26 u32 offset; 27 u32 offset;
@@ -82,10 +83,12 @@ static int nft_hash_init(const struct nft_ctx *ctx,
82 if (priv->offset + priv->modulus - 1 < priv->offset) 83 if (priv->offset + priv->modulus - 1 < priv->offset)
83 return -EOVERFLOW; 84 return -EOVERFLOW;
84 85
85 if (tb[NFTA_HASH_SEED]) 86 if (tb[NFTA_HASH_SEED]) {
86 priv->seed = ntohl(nla_get_be32(tb[NFTA_HASH_SEED])); 87 priv->seed = ntohl(nla_get_be32(tb[NFTA_HASH_SEED]));
87 else 88 } else {
89 priv->autogen_seed = true;
88 get_random_bytes(&priv->seed, sizeof(priv->seed)); 90 get_random_bytes(&priv->seed, sizeof(priv->seed));
91 }
89 92
90 return nft_validate_register_load(priv->sreg, len) && 93 return nft_validate_register_load(priv->sreg, len) &&
91 nft_validate_register_store(ctx, priv->dreg, NULL, 94 nft_validate_register_store(ctx, priv->dreg, NULL,
@@ -105,7 +108,8 @@ static int nft_hash_dump(struct sk_buff *skb,
105 goto nla_put_failure; 108 goto nla_put_failure;
106 if (nla_put_be32(skb, NFTA_HASH_MODULUS, htonl(priv->modulus))) 109 if (nla_put_be32(skb, NFTA_HASH_MODULUS, htonl(priv->modulus)))
107 goto nla_put_failure; 110 goto nla_put_failure;
108 if (nla_put_be32(skb, NFTA_HASH_SEED, htonl(priv->seed))) 111 if (!priv->autogen_seed &&
112 nla_put_be32(skb, NFTA_HASH_SEED, htonl(priv->seed)))
109 goto nla_put_failure; 113 goto nla_put_failure;
110 if (priv->offset != 0) 114 if (priv->offset != 0)
111 if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset))) 115 if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset)))
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index e1f5ca9b423b..7b60e01f38ff 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -45,16 +45,15 @@ void nft_meta_get_eval(const struct nft_expr *expr,
45 *dest = skb->len; 45 *dest = skb->len;
46 break; 46 break;
47 case NFT_META_PROTOCOL: 47 case NFT_META_PROTOCOL:
48 *dest = 0; 48 nft_reg_store16(dest, (__force u16)skb->protocol);
49 *(__be16 *)dest = skb->protocol;
50 break; 49 break;
51 case NFT_META_NFPROTO: 50 case NFT_META_NFPROTO:
52 *dest = nft_pf(pkt); 51 nft_reg_store8(dest, nft_pf(pkt));
53 break; 52 break;
54 case NFT_META_L4PROTO: 53 case NFT_META_L4PROTO:
55 if (!pkt->tprot_set) 54 if (!pkt->tprot_set)
56 goto err; 55 goto err;
57 *dest = pkt->tprot; 56 nft_reg_store8(dest, pkt->tprot);
58 break; 57 break;
59 case NFT_META_PRIORITY: 58 case NFT_META_PRIORITY:
60 *dest = skb->priority; 59 *dest = skb->priority;
@@ -85,14 +84,12 @@ void nft_meta_get_eval(const struct nft_expr *expr,
85 case NFT_META_IIFTYPE: 84 case NFT_META_IIFTYPE:
86 if (in == NULL) 85 if (in == NULL)
87 goto err; 86 goto err;
88 *dest = 0; 87 nft_reg_store16(dest, in->type);
89 *(u16 *)dest = in->type;
90 break; 88 break;
91 case NFT_META_OIFTYPE: 89 case NFT_META_OIFTYPE:
92 if (out == NULL) 90 if (out == NULL)
93 goto err; 91 goto err;
94 *dest = 0; 92 nft_reg_store16(dest, out->type);
95 *(u16 *)dest = out->type;
96 break; 93 break;
97 case NFT_META_SKUID: 94 case NFT_META_SKUID:
98 sk = skb_to_full_sk(skb); 95 sk = skb_to_full_sk(skb);
@@ -142,19 +139,19 @@ void nft_meta_get_eval(const struct nft_expr *expr,
142#endif 139#endif
143 case NFT_META_PKTTYPE: 140 case NFT_META_PKTTYPE:
144 if (skb->pkt_type != PACKET_LOOPBACK) { 141 if (skb->pkt_type != PACKET_LOOPBACK) {
145 *dest = skb->pkt_type; 142 nft_reg_store8(dest, skb->pkt_type);
146 break; 143 break;
147 } 144 }
148 145
149 switch (nft_pf(pkt)) { 146 switch (nft_pf(pkt)) {
150 case NFPROTO_IPV4: 147 case NFPROTO_IPV4:
151 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) 148 if (ipv4_is_multicast(ip_hdr(skb)->daddr))
152 *dest = PACKET_MULTICAST; 149 nft_reg_store8(dest, PACKET_MULTICAST);
153 else 150 else
154 *dest = PACKET_BROADCAST; 151 nft_reg_store8(dest, PACKET_BROADCAST);
155 break; 152 break;
156 case NFPROTO_IPV6: 153 case NFPROTO_IPV6:
157 *dest = PACKET_MULTICAST; 154 nft_reg_store8(dest, PACKET_MULTICAST);
158 break; 155 break;
159 case NFPROTO_NETDEV: 156 case NFPROTO_NETDEV:
160 switch (skb->protocol) { 157 switch (skb->protocol) {
@@ -168,14 +165,14 @@ void nft_meta_get_eval(const struct nft_expr *expr,
168 goto err; 165 goto err;
169 166
170 if (ipv4_is_multicast(iph->daddr)) 167 if (ipv4_is_multicast(iph->daddr))
171 *dest = PACKET_MULTICAST; 168 nft_reg_store8(dest, PACKET_MULTICAST);
172 else 169 else
173 *dest = PACKET_BROADCAST; 170 nft_reg_store8(dest, PACKET_BROADCAST);
174 171
175 break; 172 break;
176 } 173 }
177 case htons(ETH_P_IPV6): 174 case htons(ETH_P_IPV6):
178 *dest = PACKET_MULTICAST; 175 nft_reg_store8(dest, PACKET_MULTICAST);
179 break; 176 break;
180 default: 177 default:
181 WARN_ON_ONCE(1); 178 WARN_ON_ONCE(1);
@@ -230,7 +227,9 @@ void nft_meta_set_eval(const struct nft_expr *expr,
230{ 227{
231 const struct nft_meta *meta = nft_expr_priv(expr); 228 const struct nft_meta *meta = nft_expr_priv(expr);
232 struct sk_buff *skb = pkt->skb; 229 struct sk_buff *skb = pkt->skb;
233 u32 value = regs->data[meta->sreg]; 230 u32 *sreg = &regs->data[meta->sreg];
231 u32 value = *sreg;
232 u8 pkt_type;
234 233
235 switch (meta->key) { 234 switch (meta->key) {
236 case NFT_META_MARK: 235 case NFT_META_MARK:
@@ -240,9 +239,12 @@ void nft_meta_set_eval(const struct nft_expr *expr,
240 skb->priority = value; 239 skb->priority = value;
241 break; 240 break;
242 case NFT_META_PKTTYPE: 241 case NFT_META_PKTTYPE:
243 if (skb->pkt_type != value && 242 pkt_type = nft_reg_load8(sreg);
244 skb_pkt_type_ok(value) && skb_pkt_type_ok(skb->pkt_type)) 243
245 skb->pkt_type = value; 244 if (skb->pkt_type != pkt_type &&
245 skb_pkt_type_ok(pkt_type) &&
246 skb_pkt_type_ok(skb->pkt_type))
247 skb->pkt_type = pkt_type;
246 break; 248 break;
247 case NFT_META_NFTRACE: 249 case NFT_META_NFTRACE:
248 skb->nf_trace = !!value; 250 skb->nf_trace = !!value;
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index 19a7bf3236f9..439e0bd152a0 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr,
65 } 65 }
66 66
67 if (priv->sreg_proto_min) { 67 if (priv->sreg_proto_min) {
68 range.min_proto.all = 68 range.min_proto.all = (__force __be16)nft_reg_load16(
69 *(__be16 *)&regs->data[priv->sreg_proto_min]; 69 &regs->data[priv->sreg_proto_min]);
70 range.max_proto.all = 70 range.max_proto.all = (__force __be16)nft_reg_load16(
71 *(__be16 *)&regs->data[priv->sreg_proto_max]; 71 &regs->data[priv->sreg_proto_max]);
72 range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 72 range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
73 } 73 }
74 74
diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
index 152d226552c1..8ebbc2940f4c 100644
--- a/net/netfilter/nft_set_bitmap.c
+++ b/net/netfilter/nft_set_bitmap.c
@@ -15,6 +15,11 @@
15#include <linux/netfilter/nf_tables.h> 15#include <linux/netfilter/nf_tables.h>
16#include <net/netfilter/nf_tables.h> 16#include <net/netfilter/nf_tables.h>
17 17
18struct nft_bitmap_elem {
19 struct list_head head;
20 struct nft_set_ext ext;
21};
22
18/* This bitmap uses two bits to represent one element. These two bits determine 23/* This bitmap uses two bits to represent one element. These two bits determine
19 * the element state in the current and the future generation. 24 * the element state in the current and the future generation.
20 * 25 *
@@ -41,13 +46,22 @@
41 * restore its previous state. 46 * restore its previous state.
42 */ 47 */
43struct nft_bitmap { 48struct nft_bitmap {
44 u16 bitmap_size; 49 struct list_head list;
45 u8 bitmap[]; 50 u16 bitmap_size;
51 u8 bitmap[];
46}; 52};
47 53
48static inline void nft_bitmap_location(u32 key, u32 *idx, u32 *off) 54static inline void nft_bitmap_location(const struct nft_set *set,
55 const void *key,
56 u32 *idx, u32 *off)
49{ 57{
50 u32 k = (key << 1); 58 u32 k;
59
60 if (set->klen == 2)
61 k = *(u16 *)key;
62 else
63 k = *(u8 *)key;
64 k <<= 1;
51 65
52 *idx = k / BITS_PER_BYTE; 66 *idx = k / BITS_PER_BYTE;
53 *off = k % BITS_PER_BYTE; 67 *off = k % BITS_PER_BYTE;
@@ -69,26 +83,48 @@ static bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
69 u8 genmask = nft_genmask_cur(net); 83 u8 genmask = nft_genmask_cur(net);
70 u32 idx, off; 84 u32 idx, off;
71 85
72 nft_bitmap_location(*key, &idx, &off); 86 nft_bitmap_location(set, key, &idx, &off);
73 87
74 return nft_bitmap_active(priv->bitmap, idx, off, genmask); 88 return nft_bitmap_active(priv->bitmap, idx, off, genmask);
75} 89}
76 90
91static struct nft_bitmap_elem *
92nft_bitmap_elem_find(const struct nft_set *set, struct nft_bitmap_elem *this,
93 u8 genmask)
94{
95 const struct nft_bitmap *priv = nft_set_priv(set);
96 struct nft_bitmap_elem *be;
97
98 list_for_each_entry_rcu(be, &priv->list, head) {
99 if (memcmp(nft_set_ext_key(&be->ext),
100 nft_set_ext_key(&this->ext), set->klen) ||
101 !nft_set_elem_active(&be->ext, genmask))
102 continue;
103
104 return be;
105 }
106 return NULL;
107}
108
77static int nft_bitmap_insert(const struct net *net, const struct nft_set *set, 109static int nft_bitmap_insert(const struct net *net, const struct nft_set *set,
78 const struct nft_set_elem *elem, 110 const struct nft_set_elem *elem,
79 struct nft_set_ext **_ext) 111 struct nft_set_ext **ext)
80{ 112{
81 struct nft_bitmap *priv = nft_set_priv(set); 113 struct nft_bitmap *priv = nft_set_priv(set);
82 struct nft_set_ext *ext = elem->priv; 114 struct nft_bitmap_elem *new = elem->priv, *be;
83 u8 genmask = nft_genmask_next(net); 115 u8 genmask = nft_genmask_next(net);
84 u32 idx, off; 116 u32 idx, off;
85 117
86 nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); 118 be = nft_bitmap_elem_find(set, new, genmask);
87 if (nft_bitmap_active(priv->bitmap, idx, off, genmask)) 119 if (be) {
120 *ext = &be->ext;
88 return -EEXIST; 121 return -EEXIST;
122 }
89 123
124 nft_bitmap_location(set, nft_set_ext_key(&new->ext), &idx, &off);
90 /* Enter 01 state. */ 125 /* Enter 01 state. */
91 priv->bitmap[idx] |= (genmask << off); 126 priv->bitmap[idx] |= (genmask << off);
127 list_add_tail_rcu(&new->head, &priv->list);
92 128
93 return 0; 129 return 0;
94} 130}
@@ -98,13 +134,14 @@ static void nft_bitmap_remove(const struct net *net,
98 const struct nft_set_elem *elem) 134 const struct nft_set_elem *elem)
99{ 135{
100 struct nft_bitmap *priv = nft_set_priv(set); 136 struct nft_bitmap *priv = nft_set_priv(set);
101 struct nft_set_ext *ext = elem->priv; 137 struct nft_bitmap_elem *be = elem->priv;
102 u8 genmask = nft_genmask_next(net); 138 u8 genmask = nft_genmask_next(net);
103 u32 idx, off; 139 u32 idx, off;
104 140
105 nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); 141 nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
106 /* Enter 00 state. */ 142 /* Enter 00 state. */
107 priv->bitmap[idx] &= ~(genmask << off); 143 priv->bitmap[idx] &= ~(genmask << off);
144 list_del_rcu(&be->head);
108} 145}
109 146
110static void nft_bitmap_activate(const struct net *net, 147static void nft_bitmap_activate(const struct net *net,
@@ -112,74 +149,52 @@ static void nft_bitmap_activate(const struct net *net,
112 const struct nft_set_elem *elem) 149 const struct nft_set_elem *elem)
113{ 150{
114 struct nft_bitmap *priv = nft_set_priv(set); 151 struct nft_bitmap *priv = nft_set_priv(set);
115 struct nft_set_ext *ext = elem->priv; 152 struct nft_bitmap_elem *be = elem->priv;
116 u8 genmask = nft_genmask_next(net); 153 u8 genmask = nft_genmask_next(net);
117 u32 idx, off; 154 u32 idx, off;
118 155
119 nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); 156 nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
120 /* Enter 11 state. */ 157 /* Enter 11 state. */
121 priv->bitmap[idx] |= (genmask << off); 158 priv->bitmap[idx] |= (genmask << off);
159 nft_set_elem_change_active(net, set, &be->ext);
122} 160}
123 161
124static bool nft_bitmap_flush(const struct net *net, 162static bool nft_bitmap_flush(const struct net *net,
125 const struct nft_set *set, void *ext) 163 const struct nft_set *set, void *_be)
126{ 164{
127 struct nft_bitmap *priv = nft_set_priv(set); 165 struct nft_bitmap *priv = nft_set_priv(set);
128 u8 genmask = nft_genmask_next(net); 166 u8 genmask = nft_genmask_next(net);
167 struct nft_bitmap_elem *be = _be;
129 u32 idx, off; 168 u32 idx, off;
130 169
131 nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); 170 nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
132 /* Enter 10 state, similar to deactivation. */ 171 /* Enter 10 state, similar to deactivation. */
133 priv->bitmap[idx] &= ~(genmask << off); 172 priv->bitmap[idx] &= ~(genmask << off);
173 nft_set_elem_change_active(net, set, &be->ext);
134 174
135 return true; 175 return true;
136} 176}
137 177
138static struct nft_set_ext *nft_bitmap_ext_alloc(const struct nft_set *set,
139 const struct nft_set_elem *elem)
140{
141 struct nft_set_ext_tmpl tmpl;
142 struct nft_set_ext *ext;
143
144 nft_set_ext_prepare(&tmpl);
145 nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
146
147 ext = kzalloc(tmpl.len, GFP_KERNEL);
148 if (!ext)
149 return NULL;
150
151 nft_set_ext_init(ext, &tmpl);
152 memcpy(nft_set_ext_key(ext), elem->key.val.data, set->klen);
153
154 return ext;
155}
156
157static void *nft_bitmap_deactivate(const struct net *net, 178static void *nft_bitmap_deactivate(const struct net *net,
158 const struct nft_set *set, 179 const struct nft_set *set,
159 const struct nft_set_elem *elem) 180 const struct nft_set_elem *elem)
160{ 181{
161 struct nft_bitmap *priv = nft_set_priv(set); 182 struct nft_bitmap *priv = nft_set_priv(set);
183 struct nft_bitmap_elem *this = elem->priv, *be;
162 u8 genmask = nft_genmask_next(net); 184 u8 genmask = nft_genmask_next(net);
163 struct nft_set_ext *ext; 185 u32 idx, off;
164 u32 idx, off, key = 0;
165
166 memcpy(&key, elem->key.val.data, set->klen);
167 nft_bitmap_location(key, &idx, &off);
168 186
169 if (!nft_bitmap_active(priv->bitmap, idx, off, genmask)) 187 nft_bitmap_location(set, elem->key.val.data, &idx, &off);
170 return NULL;
171 188
172 /* We have no real set extension since this is a bitmap, allocate this 189 be = nft_bitmap_elem_find(set, this, genmask);
173 * dummy object that is released from the commit/abort path. 190 if (!be)
174 */
175 ext = nft_bitmap_ext_alloc(set, elem);
176 if (!ext)
177 return NULL; 191 return NULL;
178 192
179 /* Enter 10 state. */ 193 /* Enter 10 state. */
180 priv->bitmap[idx] &= ~(genmask << off); 194 priv->bitmap[idx] &= ~(genmask << off);
195 nft_set_elem_change_active(net, set, &be->ext);
181 196
182 return ext; 197 return be;
183} 198}
184 199
185static void nft_bitmap_walk(const struct nft_ctx *ctx, 200static void nft_bitmap_walk(const struct nft_ctx *ctx,
@@ -187,47 +202,23 @@ static void nft_bitmap_walk(const struct nft_ctx *ctx,
187 struct nft_set_iter *iter) 202 struct nft_set_iter *iter)
188{ 203{
189 const struct nft_bitmap *priv = nft_set_priv(set); 204 const struct nft_bitmap *priv = nft_set_priv(set);
190 struct nft_set_ext_tmpl tmpl; 205 struct nft_bitmap_elem *be;
191 struct nft_set_elem elem; 206 struct nft_set_elem elem;
192 struct nft_set_ext *ext; 207
193 int idx, off; 208 list_for_each_entry_rcu(be, &priv->list, head) {
194 u16 key; 209 if (iter->count < iter->skip)
195 210 goto cont;
196 nft_set_ext_prepare(&tmpl); 211 if (!nft_set_elem_active(&be->ext, iter->genmask))
197 nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen); 212 goto cont;
198 213
199 for (idx = 0; idx < priv->bitmap_size; idx++) { 214 elem.priv = be;
200 for (off = 0; off < BITS_PER_BYTE; off += 2) { 215
201 if (iter->count < iter->skip) 216 iter->err = iter->fn(ctx, set, iter, &elem);
202 goto cont; 217
203 218 if (iter->err < 0)
204 if (!nft_bitmap_active(priv->bitmap, idx, off, 219 return;
205 iter->genmask))
206 goto cont;
207
208 ext = kzalloc(tmpl.len, GFP_KERNEL);
209 if (!ext) {
210 iter->err = -ENOMEM;
211 return;
212 }
213 nft_set_ext_init(ext, &tmpl);
214 key = ((idx * BITS_PER_BYTE) + off) >> 1;
215 memcpy(nft_set_ext_key(ext), &key, set->klen);
216
217 elem.priv = ext;
218 iter->err = iter->fn(ctx, set, iter, &elem);
219
220 /* On set flush, this dummy extension object is released
221 * from the commit/abort path.
222 */
223 if (!iter->flush)
224 kfree(ext);
225
226 if (iter->err < 0)
227 return;
228cont: 220cont:
229 iter->count++; 221 iter->count++;
230 }
231 } 222 }
232} 223}
233 224
@@ -258,6 +249,7 @@ static int nft_bitmap_init(const struct nft_set *set,
258{ 249{
259 struct nft_bitmap *priv = nft_set_priv(set); 250 struct nft_bitmap *priv = nft_set_priv(set);
260 251
252 INIT_LIST_HEAD(&priv->list);
261 priv->bitmap_size = nft_bitmap_size(set->klen); 253 priv->bitmap_size = nft_bitmap_size(set->klen);
262 254
263 return 0; 255 return 0;
@@ -283,6 +275,7 @@ static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features,
283 275
284static struct nft_set_ops nft_bitmap_ops __read_mostly = { 276static struct nft_set_ops nft_bitmap_ops __read_mostly = {
285 .privsize = nft_bitmap_privsize, 277 .privsize = nft_bitmap_privsize,
278 .elemsize = offsetof(struct nft_bitmap_elem, ext),
286 .estimate = nft_bitmap_estimate, 279 .estimate = nft_bitmap_estimate,
287 .init = nft_bitmap_init, 280 .init = nft_bitmap_init,
288 .destroy = nft_bitmap_destroy, 281 .destroy = nft_bitmap_destroy,
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 27241a767f17..c64aca611ac5 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -104,7 +104,7 @@ tcpmss_mangle_packet(struct sk_buff *skb,
104 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); 104 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
105 tcp_hdrlen = tcph->doff * 4; 105 tcp_hdrlen = tcph->doff * 4;
106 106
107 if (len < tcp_hdrlen) 107 if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr))
108 return -1; 108 return -1;
109 109
110 if (info->mss == XT_TCPMSS_CLAMP_PMTU) { 110 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
@@ -152,6 +152,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
152 if (len > tcp_hdrlen) 152 if (len > tcp_hdrlen)
153 return 0; 153 return 0;
154 154
155 /* tcph->doff has 4 bits, do not wrap it to 0 */
156 if (tcp_hdrlen >= 15 * 4)
157 return 0;
158
155 /* 159 /*
156 * MSS Option not found ?! add it.. 160 * MSS Option not found ?! add it..
157 */ 161 */
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 80cb7babeb64..df7f1df00330 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -393,7 +393,8 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
393 393
394 rcu_read_lock(); 394 rcu_read_lock();
395 indev = __in6_dev_get(skb->dev); 395 indev = __in6_dev_get(skb->dev);
396 if (indev) 396 if (indev) {
397 read_lock_bh(&indev->lock);
397 list_for_each_entry(ifa, &indev->addr_list, if_list) { 398 list_for_each_entry(ifa, &indev->addr_list, if_list) {
398 if (ifa->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED)) 399 if (ifa->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED))
399 continue; 400 continue;
@@ -401,6 +402,8 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
401 laddr = &ifa->addr; 402 laddr = &ifa->addr;
402 break; 403 break;
403 } 404 }
405 read_unlock_bh(&indev->lock);
406 }
404 rcu_read_unlock(); 407 rcu_read_unlock();
405 408
406 return laddr ? laddr : daddr; 409 return laddr ? laddr : daddr;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 7b73c7c161a9..596eaff66649 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -96,6 +96,44 @@ EXPORT_SYMBOL_GPL(nl_table);
96 96
97static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); 97static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
98 98
99static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS];
100
101static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
102 "nlk_cb_mutex-ROUTE",
103 "nlk_cb_mutex-1",
104 "nlk_cb_mutex-USERSOCK",
105 "nlk_cb_mutex-FIREWALL",
106 "nlk_cb_mutex-SOCK_DIAG",
107 "nlk_cb_mutex-NFLOG",
108 "nlk_cb_mutex-XFRM",
109 "nlk_cb_mutex-SELINUX",
110 "nlk_cb_mutex-ISCSI",
111 "nlk_cb_mutex-AUDIT",
112 "nlk_cb_mutex-FIB_LOOKUP",
113 "nlk_cb_mutex-CONNECTOR",
114 "nlk_cb_mutex-NETFILTER",
115 "nlk_cb_mutex-IP6_FW",
116 "nlk_cb_mutex-DNRTMSG",
117 "nlk_cb_mutex-KOBJECT_UEVENT",
118 "nlk_cb_mutex-GENERIC",
119 "nlk_cb_mutex-17",
120 "nlk_cb_mutex-SCSITRANSPORT",
121 "nlk_cb_mutex-ECRYPTFS",
122 "nlk_cb_mutex-RDMA",
123 "nlk_cb_mutex-CRYPTO",
124 "nlk_cb_mutex-SMC",
125 "nlk_cb_mutex-23",
126 "nlk_cb_mutex-24",
127 "nlk_cb_mutex-25",
128 "nlk_cb_mutex-26",
129 "nlk_cb_mutex-27",
130 "nlk_cb_mutex-28",
131 "nlk_cb_mutex-29",
132 "nlk_cb_mutex-30",
133 "nlk_cb_mutex-31",
134 "nlk_cb_mutex-MAX_LINKS"
135};
136
99static int netlink_dump(struct sock *sk); 137static int netlink_dump(struct sock *sk);
100static void netlink_skb_destructor(struct sk_buff *skb); 138static void netlink_skb_destructor(struct sk_buff *skb);
101 139
@@ -585,6 +623,9 @@ static int __netlink_create(struct net *net, struct socket *sock,
585 } else { 623 } else {
586 nlk->cb_mutex = &nlk->cb_def_mutex; 624 nlk->cb_mutex = &nlk->cb_def_mutex;
587 mutex_init(nlk->cb_mutex); 625 mutex_init(nlk->cb_mutex);
626 lockdep_set_class_and_name(nlk->cb_mutex,
627 nlk_cb_mutex_keys + protocol,
628 nlk_cb_mutex_key_strings[protocol]);
588 } 629 }
589 init_waitqueue_head(&nlk->wait); 630 init_waitqueue_head(&nlk->wait);
590 631
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index fb6e10fdb217..92e0981f7404 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -783,8 +783,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
783 783
784 if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid, 784 if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
785 cb->nlh->nlmsg_seq, NLM_F_MULTI, 785 cb->nlh->nlmsg_seq, NLM_F_MULTI,
786 skb, CTRL_CMD_NEWFAMILY) < 0) 786 skb, CTRL_CMD_NEWFAMILY) < 0) {
787 n--;
787 break; 788 break;
789 }
788 } 790 }
789 791
790 cb->args[0] = n; 792 cb->args[0] = n;
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index e0a87776a010..7b2c2fce408a 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -643,8 +643,8 @@ static bool skb_nfct_cached(struct net *net,
643 */ 643 */
644 if (nf_ct_is_confirmed(ct)) 644 if (nf_ct_is_confirmed(ct))
645 nf_ct_delete(ct, 0, 0); 645 nf_ct_delete(ct, 0, 0);
646 else 646
647 nf_conntrack_put(&ct->ct_general); 647 nf_conntrack_put(&ct->ct_general);
648 nf_ct_set(skb, NULL, 0); 648 nf_ct_set(skb, NULL, 0);
649 return false; 649 return false;
650 } 650 }
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 9d4bb8eb63f2..3f76cb765e5b 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -527,7 +527,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
527 527
528 /* Link layer. */ 528 /* Link layer. */
529 clear_vlan(key); 529 clear_vlan(key);
530 if (key->mac_proto == MAC_PROTO_NONE) { 530 if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
531 if (unlikely(eth_type_vlan(skb->protocol))) 531 if (unlikely(eth_type_vlan(skb->protocol)))
532 return -EINVAL; 532 return -EINVAL;
533 533
@@ -745,7 +745,13 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
745 745
746int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key) 746int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
747{ 747{
748 return key_extract(skb, key); 748 int res;
749
750 res = key_extract(skb, key);
751 if (!res)
752 key->mac_proto &= ~SW_FLOW_KEY_INVALID;
753
754 return res;
749} 755}
750 756
751static int key_extract_mac_proto(struct sk_buff *skb) 757static int key_extract_mac_proto(struct sk_buff *skb)
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 6f5fa50f716d..1105a838bab8 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -604,7 +604,7 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
604 ipv4 = true; 604 ipv4 = true;
605 break; 605 break;
606 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: 606 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
607 SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst, 607 SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.src,
608 nla_get_in6_addr(a), is_mask); 608 nla_get_in6_addr(a), is_mask);
609 ipv6 = true; 609 ipv6 = true;
610 break; 610 break;
@@ -665,6 +665,8 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
665 tun_flags |= TUNNEL_VXLAN_OPT; 665 tun_flags |= TUNNEL_VXLAN_OPT;
666 opts_type = type; 666 opts_type = type;
667 break; 667 break;
668 case OVS_TUNNEL_KEY_ATTR_PAD:
669 break;
668 default: 670 default:
669 OVS_NLERR(log, "Unknown IP tunnel attribute %d", 671 OVS_NLERR(log, "Unknown IP tunnel attribute %d",
670 type); 672 type);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index a0dbe7ca8f72..8489beff5c25 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3665,6 +3665,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3665 return -EBUSY; 3665 return -EBUSY;
3666 if (copy_from_user(&val, optval, sizeof(val))) 3666 if (copy_from_user(&val, optval, sizeof(val)))
3667 return -EFAULT; 3667 return -EFAULT;
3668 if (val > INT_MAX)
3669 return -EINVAL;
3668 po->tp_reserve = val; 3670 po->tp_reserve = val;
3669 return 0; 3671 return 0;
3670 } 3672 }
@@ -4193,8 +4195,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4193 if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) 4195 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4194 goto out; 4196 goto out;
4195 if (po->tp_version >= TPACKET_V3 && 4197 if (po->tp_version >= TPACKET_V3 &&
4196 (int)(req->tp_block_size - 4198 req->tp_block_size <=
4197 BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0) 4199 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
4198 goto out; 4200 goto out;
4199 if (unlikely(req->tp_frame_size < po->tp_hdrlen + 4201 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
4200 po->tp_reserve)) 4202 po->tp_reserve))
@@ -4205,6 +4207,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4205 rb->frames_per_block = req->tp_block_size / req->tp_frame_size; 4207 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4206 if (unlikely(rb->frames_per_block == 0)) 4208 if (unlikely(rb->frames_per_block == 0))
4207 goto out; 4209 goto out;
4210 if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
4211 goto out;
4208 if (unlikely((rb->frames_per_block * req->tp_block_nr) != 4212 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4209 req->tp_frame_nr)) 4213 req->tp_frame_nr))
4210 goto out; 4214 goto out;
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index ae5ac175b2be..9da7368b0140 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -658,7 +658,9 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
658 } 658 }
659 659
660 if (plen != len) { 660 if (plen != len) {
661 skb_pad(skb, plen - len); 661 rc = skb_pad(skb, plen - len);
662 if (rc)
663 goto out_node;
662 skb_put(skb, plen - len); 664 skb_put(skb, plen - len);
663 } 665 }
664 666
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 3f9d8d7ec632..b099b64366f3 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -275,6 +275,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
275 rxrpc_conn_retransmit_call(conn, skb); 275 rxrpc_conn_retransmit_call(conn, skb);
276 return 0; 276 return 0;
277 277
278 case RXRPC_PACKET_TYPE_BUSY:
279 /* Just ignore BUSY packets for now. */
280 return 0;
281
278 case RXRPC_PACKET_TYPE_ABORT: 282 case RXRPC_PACKET_TYPE_ABORT:
279 if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), 283 if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
280 &wtmp, sizeof(wtmp)) < 0) 284 &wtmp, sizeof(wtmp)) < 0)
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index b70aa57319ea..e05b924618a0 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -529,20 +529,20 @@ errout:
529 return err; 529 return err;
530} 530}
531 531
532static int nla_memdup_cookie(struct tc_action *a, struct nlattr **tb) 532static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
533{ 533{
534 a->act_cookie = kzalloc(sizeof(*a->act_cookie), GFP_KERNEL); 534 struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
535 if (!a->act_cookie) 535 if (!c)
536 return -ENOMEM; 536 return NULL;
537 537
538 a->act_cookie->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL); 538 c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
539 if (!a->act_cookie->data) { 539 if (!c->data) {
540 kfree(a->act_cookie); 540 kfree(c);
541 return -ENOMEM; 541 return NULL;
542 } 542 }
543 a->act_cookie->len = nla_len(tb[TCA_ACT_COOKIE]); 543 c->len = nla_len(tb[TCA_ACT_COOKIE]);
544 544
545 return 0; 545 return c;
546} 546}
547 547
548struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla, 548struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
@@ -551,6 +551,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
551{ 551{
552 struct tc_action *a; 552 struct tc_action *a;
553 struct tc_action_ops *a_o; 553 struct tc_action_ops *a_o;
554 struct tc_cookie *cookie = NULL;
554 char act_name[IFNAMSIZ]; 555 char act_name[IFNAMSIZ];
555 struct nlattr *tb[TCA_ACT_MAX + 1]; 556 struct nlattr *tb[TCA_ACT_MAX + 1];
556 struct nlattr *kind; 557 struct nlattr *kind;
@@ -566,6 +567,18 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
566 goto err_out; 567 goto err_out;
567 if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) 568 if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ)
568 goto err_out; 569 goto err_out;
570 if (tb[TCA_ACT_COOKIE]) {
571 int cklen = nla_len(tb[TCA_ACT_COOKIE]);
572
573 if (cklen > TC_COOKIE_MAX_SIZE)
574 goto err_out;
575
576 cookie = nla_memdup_cookie(tb);
577 if (!cookie) {
578 err = -ENOMEM;
579 goto err_out;
580 }
581 }
569 } else { 582 } else {
570 err = -EINVAL; 583 err = -EINVAL;
571 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) 584 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ)
@@ -604,20 +617,12 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
604 if (err < 0) 617 if (err < 0)
605 goto err_mod; 618 goto err_mod;
606 619
607 if (tb[TCA_ACT_COOKIE]) { 620 if (name == NULL && tb[TCA_ACT_COOKIE]) {
608 int cklen = nla_len(tb[TCA_ACT_COOKIE]); 621 if (a->act_cookie) {
609 622 kfree(a->act_cookie->data);
610 if (cklen > TC_COOKIE_MAX_SIZE) { 623 kfree(a->act_cookie);
611 err = -EINVAL;
612 tcf_hash_release(a, bind);
613 goto err_mod;
614 }
615
616 if (nla_memdup_cookie(a, tb) < 0) {
617 err = -ENOMEM;
618 tcf_hash_release(a, bind);
619 goto err_mod;
620 } 624 }
625 a->act_cookie = cookie;
621 } 626 }
622 627
623 /* module count goes up only when brand new policy is created 628 /* module count goes up only when brand new policy is created
@@ -632,6 +637,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
632err_mod: 637err_mod:
633 module_put(a_o->owner); 638 module_put(a_o->owner);
634err_out: 639err_out:
640 if (cookie) {
641 kfree(cookie->data);
642 kfree(cookie);
643 }
635 return ERR_PTR(err); 644 return ERR_PTR(err);
636} 645}
637 646
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 802ac7c2e5e8..5334e309f17f 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -201,9 +201,13 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
201 pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p); 201 pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
202 202
203 if (p->set_tc_index) { 203 if (p->set_tc_index) {
204 int wlen = skb_network_offset(skb);
205
204 switch (tc_skb_protocol(skb)) { 206 switch (tc_skb_protocol(skb)) {
205 case htons(ETH_P_IP): 207 case htons(ETH_P_IP):
206 if (skb_cow_head(skb, sizeof(struct iphdr))) 208 wlen += sizeof(struct iphdr);
209 if (!pskb_may_pull(skb, wlen) ||
210 skb_try_make_writable(skb, wlen))
207 goto drop; 211 goto drop;
208 212
209 skb->tc_index = ipv4_get_dsfield(ip_hdr(skb)) 213 skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
@@ -211,7 +215,9 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
211 break; 215 break;
212 216
213 case htons(ETH_P_IPV6): 217 case htons(ETH_P_IPV6):
214 if (skb_cow_head(skb, sizeof(struct ipv6hdr))) 218 wlen += sizeof(struct ipv6hdr);
219 if (!pskb_may_pull(skb, wlen) ||
220 skb_try_make_writable(skb, wlen))
215 goto drop; 221 goto drop;
216 222
217 skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb)) 223 skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index b052b27a984e..1a2f9e964330 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -794,7 +794,7 @@ static void attach_default_qdiscs(struct net_device *dev)
794 } 794 }
795 } 795 }
796#ifdef CONFIG_NET_SCHED 796#ifdef CONFIG_NET_SCHED
797 if (dev->qdisc) 797 if (dev->qdisc != &noop_qdisc)
798 qdisc_hash_add(dev->qdisc); 798 qdisc_hash_add(dev->qdisc);
799#endif 799#endif
800} 800}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 2a6835b4562b..a9708da28eb5 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -71,9 +71,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
71{ 71{
72 struct net *net = sock_net(sk); 72 struct net *net = sock_net(sk);
73 struct sctp_sock *sp; 73 struct sctp_sock *sp;
74 int i;
75 sctp_paramhdr_t *p; 74 sctp_paramhdr_t *p;
76 int err; 75 int i;
77 76
78 /* Retrieve the SCTP per socket area. */ 77 /* Retrieve the SCTP per socket area. */
79 sp = sctp_sk((struct sock *)sk); 78 sp = sctp_sk((struct sock *)sk);
@@ -247,6 +246,9 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
247 if (!sctp_ulpq_init(&asoc->ulpq, asoc)) 246 if (!sctp_ulpq_init(&asoc->ulpq, asoc))
248 goto fail_init; 247 goto fail_init;
249 248
249 if (sctp_stream_new(asoc, gfp))
250 goto fail_init;
251
250 /* Assume that peer would support both address types unless we are 252 /* Assume that peer would support both address types unless we are
251 * told otherwise. 253 * told otherwise.
252 */ 254 */
@@ -264,9 +266,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
264 266
265 /* AUTH related initializations */ 267 /* AUTH related initializations */
266 INIT_LIST_HEAD(&asoc->endpoint_shared_keys); 268 INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
267 err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp); 269 if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp))
268 if (err) 270 goto stream_free;
269 goto fail_init;
270 271
271 asoc->active_key_id = ep->active_key_id; 272 asoc->active_key_id = ep->active_key_id;
272 asoc->prsctp_enable = ep->prsctp_enable; 273 asoc->prsctp_enable = ep->prsctp_enable;
@@ -289,6 +290,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
289 290
290 return asoc; 291 return asoc;
291 292
293stream_free:
294 sctp_stream_free(asoc->stream);
292fail_init: 295fail_init:
293 sock_put(asoc->base.sk); 296 sock_put(asoc->base.sk);
294 sctp_endpoint_put(asoc->ep); 297 sctp_endpoint_put(asoc->ep);
@@ -1409,7 +1412,7 @@ sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
1409/* Update the association's pmtu and frag_point by going through all the 1412/* Update the association's pmtu and frag_point by going through all the
1410 * transports. This routine is called when a transport's PMTU has changed. 1413 * transports. This routine is called when a transport's PMTU has changed.
1411 */ 1414 */
1412void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc) 1415void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
1413{ 1416{
1414 struct sctp_transport *t; 1417 struct sctp_transport *t;
1415 __u32 pmtu = 0; 1418 __u32 pmtu = 0;
@@ -1421,8 +1424,8 @@ void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
1421 list_for_each_entry(t, &asoc->peer.transport_addr_list, 1424 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1422 transports) { 1425 transports) {
1423 if (t->pmtu_pending && t->dst) { 1426 if (t->pmtu_pending && t->dst) {
1424 sctp_transport_update_pmtu(sk, t, 1427 sctp_transport_update_pmtu(
1425 SCTP_TRUNC4(dst_mtu(t->dst))); 1428 t, SCTP_TRUNC4(dst_mtu(t->dst)));
1426 t->pmtu_pending = 0; 1429 t->pmtu_pending = 0;
1427 } 1430 }
1428 if (!pmtu || (t->pathmtu < pmtu)) 1431 if (!pmtu || (t->pathmtu < pmtu))
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 2a28ab20487f..0e06a278d2a9 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -401,10 +401,10 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
401 401
402 if (t->param_flags & SPP_PMTUD_ENABLE) { 402 if (t->param_flags & SPP_PMTUD_ENABLE) {
403 /* Update transports view of the MTU */ 403 /* Update transports view of the MTU */
404 sctp_transport_update_pmtu(sk, t, pmtu); 404 sctp_transport_update_pmtu(t, pmtu);
405 405
406 /* Update association pmtu. */ 406 /* Update association pmtu. */
407 sctp_assoc_sync_pmtu(sk, asoc); 407 sctp_assoc_sync_pmtu(asoc);
408 } 408 }
409 409
410 /* Retransmit with the new pmtu setting. 410 /* Retransmit with the new pmtu setting.
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 71ce6b945dcb..1409a875ad8e 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -86,43 +86,53 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
86{ 86{
87 struct sctp_transport *tp = packet->transport; 87 struct sctp_transport *tp = packet->transport;
88 struct sctp_association *asoc = tp->asoc; 88 struct sctp_association *asoc = tp->asoc;
89 struct sock *sk;
89 90
90 pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag); 91 pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
91
92 packet->vtag = vtag; 92 packet->vtag = vtag;
93 93
94 if (asoc && tp->dst) { 94 /* do the following jobs only once for a flush schedule */
95 struct sock *sk = asoc->base.sk; 95 if (!sctp_packet_empty(packet))
96 96 return;
97 rcu_read_lock();
98 if (__sk_dst_get(sk) != tp->dst) {
99 dst_hold(tp->dst);
100 sk_setup_caps(sk, tp->dst);
101 }
102
103 if (sk_can_gso(sk)) {
104 struct net_device *dev = tp->dst->dev;
105 97
106 packet->max_size = dev->gso_max_size; 98 /* set packet max_size with pathmtu */
107 } else { 99 packet->max_size = tp->pathmtu;
108 packet->max_size = asoc->pathmtu; 100 if (!asoc)
109 } 101 return;
110 rcu_read_unlock();
111 102
112 } else { 103 /* update dst or transport pathmtu if in need */
113 packet->max_size = tp->pathmtu; 104 sk = asoc->base.sk;
105 if (!sctp_transport_dst_check(tp)) {
106 sctp_transport_route(tp, NULL, sctp_sk(sk));
107 if (asoc->param_flags & SPP_PMTUD_ENABLE)
108 sctp_assoc_sync_pmtu(asoc);
109 } else if (!sctp_transport_pmtu_check(tp)) {
110 if (asoc->param_flags & SPP_PMTUD_ENABLE)
111 sctp_assoc_sync_pmtu(asoc);
114 } 112 }
115 113
116 if (ecn_capable && sctp_packet_empty(packet)) { 114 /* If there a is a prepend chunk stick it on the list before
117 struct sctp_chunk *chunk; 115 * any other chunks get appended.
116 */
117 if (ecn_capable) {
118 struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc);
118 119
119 /* If there a is a prepend chunk stick it on the list before
120 * any other chunks get appended.
121 */
122 chunk = sctp_get_ecne_prepend(asoc);
123 if (chunk) 120 if (chunk)
124 sctp_packet_append_chunk(packet, chunk); 121 sctp_packet_append_chunk(packet, chunk);
125 } 122 }
123
124 if (!tp->dst)
125 return;
126
127 /* set packet max_size with gso_max_size if gso is enabled*/
128 rcu_read_lock();
129 if (__sk_dst_get(sk) != tp->dst) {
130 dst_hold(tp->dst);
131 sk_setup_caps(sk, tp->dst);
132 }
133 packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size
134 : asoc->pathmtu;
135 rcu_read_unlock();
126} 136}
127 137
128/* Initialize the packet structure. */ 138/* Initialize the packet structure. */
@@ -546,7 +556,6 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
546 struct sctp_association *asoc = tp->asoc; 556 struct sctp_association *asoc = tp->asoc;
547 struct sctp_chunk *chunk, *tmp; 557 struct sctp_chunk *chunk, *tmp;
548 int pkt_count, gso = 0; 558 int pkt_count, gso = 0;
549 int confirm;
550 struct dst_entry *dst; 559 struct dst_entry *dst;
551 struct sk_buff *head; 560 struct sk_buff *head;
552 struct sctphdr *sh; 561 struct sctphdr *sh;
@@ -583,12 +592,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
583 sh->vtag = htonl(packet->vtag); 592 sh->vtag = htonl(packet->vtag);
584 sh->checksum = 0; 593 sh->checksum = 0;
585 594
586 /* update dst if in need */ 595 /* drop packet if no dst */
587 if (!sctp_transport_dst_check(tp)) {
588 sctp_transport_route(tp, NULL, sctp_sk(sk));
589 if (asoc && asoc->param_flags & SPP_PMTUD_ENABLE)
590 sctp_assoc_sync_pmtu(sk, asoc);
591 }
592 dst = dst_clone(tp->dst); 596 dst = dst_clone(tp->dst);
593 if (!dst) { 597 if (!dst) {
594 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 598 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
@@ -625,13 +629,13 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
625 asoc->peer.last_sent_to = tp; 629 asoc->peer.last_sent_to = tp;
626 } 630 }
627 head->ignore_df = packet->ipfragok; 631 head->ignore_df = packet->ipfragok;
628 confirm = tp->dst_pending_confirm; 632 if (tp->dst_pending_confirm)
629 if (confirm)
630 skb_set_dst_pending_confirm(head, 1); 633 skb_set_dst_pending_confirm(head, 1);
631 /* neighbour should be confirmed on successful transmission or 634 /* neighbour should be confirmed on successful transmission or
632 * positive error 635 * positive error
633 */ 636 */
634 if (tp->af_specific->sctp_xmit(head, tp) >= 0 && confirm) 637 if (tp->af_specific->sctp_xmit(head, tp) >= 0 &&
638 tp->dst_pending_confirm)
635 tp->dst_pending_confirm = 0; 639 tp->dst_pending_confirm = 0;
636 640
637out: 641out:
@@ -705,7 +709,7 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
705 */ 709 */
706 710
707 if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) && 711 if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) &&
708 !chunk->msg->force_delay) 712 !asoc->force_delay)
709 /* Nothing unacked */ 713 /* Nothing unacked */
710 return SCTP_XMIT_OK; 714 return SCTP_XMIT_OK;
711 715
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index db352e5d61f8..8081476ed313 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -382,17 +382,18 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
382} 382}
383 383
384static int sctp_prsctp_prune_unsent(struct sctp_association *asoc, 384static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
385 struct sctp_sndrcvinfo *sinfo, 385 struct sctp_sndrcvinfo *sinfo, int msg_len)
386 struct list_head *queue, int msg_len)
387{ 386{
387 struct sctp_outq *q = &asoc->outqueue;
388 struct sctp_chunk *chk, *temp; 388 struct sctp_chunk *chk, *temp;
389 389
390 list_for_each_entry_safe(chk, temp, queue, list) { 390 list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
391 if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) || 391 if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
392 chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive) 392 chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
393 continue; 393 continue;
394 394
395 list_del_init(&chk->list); 395 list_del_init(&chk->list);
396 q->out_qlen -= chk->skb->len;
396 asoc->sent_cnt_removable--; 397 asoc->sent_cnt_removable--;
397 asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; 398 asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
398 399
@@ -431,9 +432,7 @@ void sctp_prsctp_prune(struct sctp_association *asoc,
431 return; 432 return;
432 } 433 }
433 434
434 sctp_prsctp_prune_unsent(asoc, sinfo, 435 sctp_prsctp_prune_unsent(asoc, sinfo, msg_len);
435 &asoc->outqueue.out_chunk_list,
436 msg_len);
437} 436}
438 437
439/* Mark all the eligible packets on a transport for retransmission. */ 438/* Mark all the eligible packets on a transport for retransmission. */
@@ -1027,8 +1026,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1027 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid 1026 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid
1028 * stream identifier. 1027 * stream identifier.
1029 */ 1028 */
1030 if (chunk->sinfo.sinfo_stream >= 1029 if (chunk->sinfo.sinfo_stream >= asoc->stream->outcnt) {
1031 asoc->c.sinit_num_ostreams) {
1032 1030
1033 /* Mark as failed send. */ 1031 /* Mark as failed send. */
1034 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM); 1032 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 206377fe91ec..a0b29d43627f 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -361,8 +361,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
361 sctp_seq_dump_remote_addrs(seq, assoc); 361 sctp_seq_dump_remote_addrs(seq, assoc);
362 seq_printf(seq, "\t%8lu %5d %5d %4d %4d %4d %8d " 362 seq_printf(seq, "\t%8lu %5d %5d %4d %4d %4d %8d "
363 "%8d %8d %8d %8d", 363 "%8d %8d %8d %8d",
364 assoc->hbinterval, assoc->c.sinit_max_instreams, 364 assoc->hbinterval, assoc->stream->incnt,
365 assoc->c.sinit_num_ostreams, assoc->max_retrans, 365 assoc->stream->outcnt, assoc->max_retrans,
366 assoc->init_retries, assoc->shutdown_retries, 366 assoc->init_retries, assoc->shutdown_retries,
367 assoc->rtx_data_chunks, 367 assoc->rtx_data_chunks,
368 atomic_read(&sk->sk_wmem_alloc), 368 atomic_read(&sk->sk_wmem_alloc),
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 969a30c7bb54..118faff6a332 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2460,15 +2460,10 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
2460 * association. 2460 * association.
2461 */ 2461 */
2462 if (!asoc->temp) { 2462 if (!asoc->temp) {
2463 int error; 2463 if (sctp_stream_init(asoc, gfp))
2464
2465 asoc->stream = sctp_stream_new(asoc->c.sinit_max_instreams,
2466 asoc->c.sinit_num_ostreams, gfp);
2467 if (!asoc->stream)
2468 goto clean_up; 2464 goto clean_up;
2469 2465
2470 error = sctp_assoc_set_id(asoc, gfp); 2466 if (sctp_assoc_set_id(asoc, gfp))
2471 if (error)
2472 goto clean_up; 2467 goto clean_up;
2473 } 2468 }
2474 2469
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index e03bb1aab4d0..24c6ccce7539 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3946,7 +3946,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net,
3946 3946
3947 /* Silently discard the chunk if stream-id is not valid */ 3947 /* Silently discard the chunk if stream-id is not valid */
3948 sctp_walk_fwdtsn(skip, chunk) { 3948 sctp_walk_fwdtsn(skip, chunk) {
3949 if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams) 3949 if (ntohs(skip->stream) >= asoc->stream->incnt)
3950 goto discard_noforce; 3950 goto discard_noforce;
3951 } 3951 }
3952 3952
@@ -4017,7 +4017,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
4017 4017
4018 /* Silently discard the chunk if stream-id is not valid */ 4018 /* Silently discard the chunk if stream-id is not valid */
4019 sctp_walk_fwdtsn(skip, chunk) { 4019 sctp_walk_fwdtsn(skip, chunk) {
4020 if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams) 4020 if (ntohs(skip->stream) >= asoc->stream->incnt)
4021 goto gen_shutdown; 4021 goto gen_shutdown;
4022 } 4022 }
4023 4023
@@ -6353,7 +6353,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
6353 * and discard the DATA chunk. 6353 * and discard the DATA chunk.
6354 */ 6354 */
6355 sid = ntohs(data_hdr->stream); 6355 sid = ntohs(data_hdr->stream);
6356 if (sid >= asoc->c.sinit_max_instreams) { 6356 if (sid >= asoc->stream->incnt) {
6357 /* Mark tsn as received even though we drop it */ 6357 /* Mark tsn as received even though we drop it */
6358 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); 6358 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
6359 6359
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 0f378ea2ae38..d9d4c92e06b3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1907,7 +1907,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1907 } 1907 }
1908 1908
1909 if (asoc->pmtu_pending) 1909 if (asoc->pmtu_pending)
1910 sctp_assoc_pending_pmtu(sk, asoc); 1910 sctp_assoc_pending_pmtu(asoc);
1911 1911
1912 /* If fragmentation is disabled and the message length exceeds the 1912 /* If fragmentation is disabled and the message length exceeds the
1913 * association fragmentation point, return EMSGSIZE. The I-D 1913 * association fragmentation point, return EMSGSIZE. The I-D
@@ -1920,7 +1920,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1920 } 1920 }
1921 1921
1922 /* Check for invalid stream. */ 1922 /* Check for invalid stream. */
1923 if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { 1923 if (sinfo->sinfo_stream >= asoc->stream->outcnt) {
1924 err = -EINVAL; 1924 err = -EINVAL;
1925 goto out_free; 1925 goto out_free;
1926 } 1926 }
@@ -1965,7 +1965,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1965 err = PTR_ERR(datamsg); 1965 err = PTR_ERR(datamsg);
1966 goto out_free; 1966 goto out_free;
1967 } 1967 }
1968 datamsg->force_delay = !!(msg->msg_flags & MSG_MORE); 1968 asoc->force_delay = !!(msg->msg_flags & MSG_MORE);
1969 1969
1970 /* Now send the (possibly) fragmented message. */ 1970 /* Now send the (possibly) fragmented message. */
1971 list_for_each_entry(chunk, &datamsg->chunks, frag_list) { 1971 list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
@@ -2435,7 +2435,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2435 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { 2435 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
2436 if (trans) { 2436 if (trans) {
2437 trans->pathmtu = params->spp_pathmtu; 2437 trans->pathmtu = params->spp_pathmtu;
2438 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2438 sctp_assoc_sync_pmtu(asoc);
2439 } else if (asoc) { 2439 } else if (asoc) {
2440 asoc->pathmtu = params->spp_pathmtu; 2440 asoc->pathmtu = params->spp_pathmtu;
2441 } else { 2441 } else {
@@ -2451,7 +2451,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2451 (trans->param_flags & ~SPP_PMTUD) | pmtud_change; 2451 (trans->param_flags & ~SPP_PMTUD) | pmtud_change;
2452 if (update) { 2452 if (update) {
2453 sctp_transport_pmtu(trans, sctp_opt2sk(sp)); 2453 sctp_transport_pmtu(trans, sctp_opt2sk(sp));
2454 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2454 sctp_assoc_sync_pmtu(asoc);
2455 } 2455 }
2456 } else if (asoc) { 2456 } else if (asoc) {
2457 asoc->param_flags = 2457 asoc->param_flags =
@@ -4461,8 +4461,8 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
4461 info->sctpi_rwnd = asoc->a_rwnd; 4461 info->sctpi_rwnd = asoc->a_rwnd;
4462 info->sctpi_unackdata = asoc->unack_data; 4462 info->sctpi_unackdata = asoc->unack_data;
4463 info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); 4463 info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
4464 info->sctpi_instrms = asoc->c.sinit_max_instreams; 4464 info->sctpi_instrms = asoc->stream->incnt;
4465 info->sctpi_outstrms = asoc->c.sinit_num_ostreams; 4465 info->sctpi_outstrms = asoc->stream->outcnt;
4466 list_for_each(pos, &asoc->base.inqueue.in_chunk_list) 4466 list_for_each(pos, &asoc->base.inqueue.in_chunk_list)
4467 info->sctpi_inqueue++; 4467 info->sctpi_inqueue++;
4468 list_for_each(pos, &asoc->outqueue.out_chunk_list) 4468 list_for_each(pos, &asoc->outqueue.out_chunk_list)
@@ -4691,8 +4691,8 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
4691 status.sstat_unackdata = asoc->unack_data; 4691 status.sstat_unackdata = asoc->unack_data;
4692 4692
4693 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); 4693 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
4694 status.sstat_instrms = asoc->c.sinit_max_instreams; 4694 status.sstat_instrms = asoc->stream->incnt;
4695 status.sstat_outstrms = asoc->c.sinit_num_ostreams; 4695 status.sstat_outstrms = asoc->stream->outcnt;
4696 status.sstat_fragmentation_point = asoc->frag_point; 4696 status.sstat_fragmentation_point = asoc->frag_point;
4697 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4697 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
4698 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, 4698 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr,
@@ -7034,6 +7034,9 @@ int sctp_inet_listen(struct socket *sock, int backlog)
7034 if (sock->state != SS_UNCONNECTED) 7034 if (sock->state != SS_UNCONNECTED)
7035 goto out; 7035 goto out;
7036 7036
7037 if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
7038 goto out;
7039
7037 /* If backlog is zero, disable listening. */ 7040 /* If backlog is zero, disable listening. */
7038 if (!backlog) { 7041 if (!backlog) {
7039 if (sctp_sstate(sk, CLOSED)) 7042 if (sctp_sstate(sk, CLOSED))
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 1c6cc04fa3a4..bbed997e1c5f 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -35,33 +35,60 @@
35#include <net/sctp/sctp.h> 35#include <net/sctp/sctp.h>
36#include <net/sctp/sm.h> 36#include <net/sctp/sm.h>
37 37
38struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp) 38int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp)
39{ 39{
40 struct sctp_stream *stream; 40 struct sctp_stream *stream;
41 int i; 41 int i;
42 42
43 stream = kzalloc(sizeof(*stream), gfp); 43 stream = kzalloc(sizeof(*stream), gfp);
44 if (!stream) 44 if (!stream)
45 return NULL; 45 return -ENOMEM;
46 46
47 stream->outcnt = outcnt; 47 stream->outcnt = asoc->c.sinit_num_ostreams;
48 stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp); 48 stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp);
49 if (!stream->out) { 49 if (!stream->out) {
50 kfree(stream); 50 kfree(stream);
51 return NULL; 51 return -ENOMEM;
52 } 52 }
53 for (i = 0; i < stream->outcnt; i++) 53 for (i = 0; i < stream->outcnt; i++)
54 stream->out[i].state = SCTP_STREAM_OPEN; 54 stream->out[i].state = SCTP_STREAM_OPEN;
55 55
56 stream->incnt = incnt; 56 asoc->stream = stream;
57
58 return 0;
59}
60
61int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp)
62{
63 struct sctp_stream *stream = asoc->stream;
64 int i;
65
66 /* Initial stream->out size may be very big, so free it and alloc
67 * a new one with new outcnt to save memory.
68 */
69 kfree(stream->out);
70 stream->outcnt = asoc->c.sinit_num_ostreams;
71 stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp);
72 if (!stream->out)
73 goto nomem;
74
75 for (i = 0; i < stream->outcnt; i++)
76 stream->out[i].state = SCTP_STREAM_OPEN;
77
78 stream->incnt = asoc->c.sinit_max_instreams;
57 stream->in = kcalloc(stream->incnt, sizeof(*stream->in), gfp); 79 stream->in = kcalloc(stream->incnt, sizeof(*stream->in), gfp);
58 if (!stream->in) { 80 if (!stream->in) {
59 kfree(stream->out); 81 kfree(stream->out);
60 kfree(stream); 82 goto nomem;
61 return NULL;
62 } 83 }
63 84
64 return stream; 85 return 0;
86
87nomem:
88 asoc->stream = NULL;
89 kfree(stream);
90
91 return -ENOMEM;
65} 92}
66 93
67void sctp_stream_free(struct sctp_stream *stream) 94void sctp_stream_free(struct sctp_stream *stream)
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 3379668af368..721eeebfcd8a 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -251,14 +251,13 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
251 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 251 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
252} 252}
253 253
254void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 pmtu) 254void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
255{ 255{
256 struct dst_entry *dst; 256 struct dst_entry *dst = sctp_transport_dst_check(t);
257 257
258 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { 258 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
259 pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n", 259 pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n",
260 __func__, pmtu, 260 __func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
261 SCTP_DEFAULT_MINSEGMENT);
262 /* Use default minimum segment size and disable 261 /* Use default minimum segment size and disable
263 * pmtu discovery on this transport. 262 * pmtu discovery on this transport.
264 */ 263 */
@@ -267,17 +266,13 @@ void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 p
267 t->pathmtu = pmtu; 266 t->pathmtu = pmtu;
268 } 267 }
269 268
270 dst = sctp_transport_dst_check(t);
271 if (!dst)
272 t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
273
274 if (dst) { 269 if (dst) {
275 dst->ops->update_pmtu(dst, sk, NULL, pmtu); 270 dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
276
277 dst = sctp_transport_dst_check(t); 271 dst = sctp_transport_dst_check(t);
278 if (!dst)
279 t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
280 } 272 }
273
274 if (!dst)
275 t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
281} 276}
282 277
283/* Caches the dst entry and source address for a transport's destination 278/* Caches the dst entry and source address for a transport's destination
diff --git a/net/socket.c b/net/socket.c
index e034fe4164be..985ef06792d6 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -652,6 +652,16 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
652} 652}
653EXPORT_SYMBOL(kernel_sendmsg); 653EXPORT_SYMBOL(kernel_sendmsg);
654 654
655static bool skb_is_err_queue(const struct sk_buff *skb)
656{
657 /* pkt_type of skbs enqueued on the error queue are set to
658 * PACKET_OUTGOING in skb_set_err_queue(). This is only safe to do
659 * in recvmsg, since skbs received on a local socket will never
660 * have a pkt_type of PACKET_OUTGOING.
661 */
662 return skb->pkt_type == PACKET_OUTGOING;
663}
664
655/* 665/*
656 * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP) 666 * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP)
657 */ 667 */
@@ -695,7 +705,8 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
695 put_cmsg(msg, SOL_SOCKET, 705 put_cmsg(msg, SOL_SOCKET,
696 SCM_TIMESTAMPING, sizeof(tss), &tss); 706 SCM_TIMESTAMPING, sizeof(tss), &tss);
697 707
698 if (skb->len && (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS)) 708 if (skb_is_err_queue(skb) && skb->len &&
709 SKB_EXT_ERR(skb)->opt_stats)
699 put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_OPT_STATS, 710 put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_OPT_STATS,
700 skb->len, skb->data); 711 skb->len, skb->data);
701 } 712 }
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 8931e33b6541..2b720fa35c4f 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1635,6 +1635,7 @@ static struct svc_xprt *svc_bc_create_socket(struct svc_serv *serv,
1635 1635
1636 xprt = &svsk->sk_xprt; 1636 xprt = &svsk->sk_xprt;
1637 svc_xprt_init(net, &svc_tcp_bc_class, xprt, serv); 1637 svc_xprt_init(net, &svc_tcp_bc_class, xprt, serv);
1638 set_bit(XPT_CONG_CTRL, &svsk->sk_xprt.xpt_flags);
1638 1639
1639 serv->sv_bc_xprt = xprt; 1640 serv->sv_bc_xprt = xprt;
1640 1641
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index c13a5c35ce14..fc8f14c7bfec 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -127,6 +127,7 @@ static struct svc_xprt *svc_rdma_bc_create(struct svc_serv *serv,
127 xprt = &cma_xprt->sc_xprt; 127 xprt = &cma_xprt->sc_xprt;
128 128
129 svc_xprt_init(net, &svc_rdma_bc_class, xprt, serv); 129 svc_xprt_init(net, &svc_rdma_bc_class, xprt, serv);
130 set_bit(XPT_CONG_CTRL, &xprt->xpt_flags);
130 serv->sv_bc_xprt = xprt; 131 serv->sv_bc_xprt = xprt;
131 132
132 dprintk("svcrdma: %s(%p)\n", __func__, xprt); 133 dprintk("svcrdma: %s(%p)\n", __func__, xprt);
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 9d94e65d0894..271cd66e4b3b 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -141,6 +141,11 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
141static void tipc_subscrp_timeout(unsigned long data) 141static void tipc_subscrp_timeout(unsigned long data)
142{ 142{
143 struct tipc_subscription *sub = (struct tipc_subscription *)data; 143 struct tipc_subscription *sub = (struct tipc_subscription *)data;
144 struct tipc_subscriber *subscriber = sub->subscriber;
145
146 spin_lock_bh(&subscriber->lock);
147 tipc_nametbl_unsubscribe(sub);
148 spin_unlock_bh(&subscriber->lock);
144 149
145 /* Notify subscriber of timeout */ 150 /* Notify subscriber of timeout */
146 tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper, 151 tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
@@ -173,7 +178,6 @@ static void tipc_subscrp_kref_release(struct kref *kref)
173 struct tipc_subscriber *subscriber = sub->subscriber; 178 struct tipc_subscriber *subscriber = sub->subscriber;
174 179
175 spin_lock_bh(&subscriber->lock); 180 spin_lock_bh(&subscriber->lock);
176 tipc_nametbl_unsubscribe(sub);
177 list_del(&sub->subscrp_list); 181 list_del(&sub->subscrp_list);
178 atomic_dec(&tn->subscription_count); 182 atomic_dec(&tn->subscription_count);
179 spin_unlock_bh(&subscriber->lock); 183 spin_unlock_bh(&subscriber->lock);
@@ -205,6 +209,7 @@ static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
205 if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) 209 if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
206 continue; 210 continue;
207 211
212 tipc_nametbl_unsubscribe(sub);
208 tipc_subscrp_get(sub); 213 tipc_subscrp_get(sub);
209 spin_unlock_bh(&subscriber->lock); 214 spin_unlock_bh(&subscriber->lock);
210 tipc_subscrp_delete(sub); 215 tipc_subscrp_delete(sub);
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 6a0d48525fcf..c36757e72844 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -146,6 +146,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp)
146 if (s) { 146 if (s) {
147 struct unix_sock *u = unix_sk(s); 147 struct unix_sock *u = unix_sk(s);
148 148
149 BUG_ON(!atomic_long_read(&u->inflight));
149 BUG_ON(list_empty(&u->link)); 150 BUG_ON(list_empty(&u->link));
150 151
151 if (atomic_long_dec_and_test(&u->inflight)) 152 if (atomic_long_dec_and_test(&u->inflight))
@@ -341,6 +342,14 @@ void unix_gc(void)
341 } 342 }
342 list_del(&cursor); 343 list_del(&cursor);
343 344
345 /* Now gc_candidates contains only garbage. Restore original
346 * inflight counters for these as well, and remove the skbuffs
347 * which are creating the cycle(s).
348 */
349 skb_queue_head_init(&hitlist);
350 list_for_each_entry(u, &gc_candidates, link)
351 scan_children(&u->sk, inc_inflight, &hitlist);
352
344 /* not_cycle_list contains those sockets which do not make up a 353 /* not_cycle_list contains those sockets which do not make up a
345 * cycle. Restore these to the inflight list. 354 * cycle. Restore these to the inflight list.
346 */ 355 */
@@ -350,14 +359,6 @@ void unix_gc(void)
350 list_move_tail(&u->link, &gc_inflight_list); 359 list_move_tail(&u->link, &gc_inflight_list);
351 } 360 }
352 361
353 /* Now gc_candidates contains only garbage. Restore original
354 * inflight counters for these as well, and remove the skbuffs
355 * which are creating the cycle(s).
356 */
357 skb_queue_head_init(&hitlist);
358 list_for_each_entry(u, &gc_candidates, link)
359 scan_children(&u->sk, inc_inflight, &hitlist);
360
361 spin_unlock(&unix_gc_lock); 362 spin_unlock(&unix_gc_lock);
362 363
363 /* Here we are. Hitlist is filled. Die. */ 364 /* Here we are. Hitlist is filled. Die. */
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 9f770f33c100..6f7f6757ceef 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1102,10 +1102,19 @@ static const struct proto_ops vsock_dgram_ops = {
1102 .sendpage = sock_no_sendpage, 1102 .sendpage = sock_no_sendpage,
1103}; 1103};
1104 1104
1105static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
1106{
1107 if (!transport->cancel_pkt)
1108 return -EOPNOTSUPP;
1109
1110 return transport->cancel_pkt(vsk);
1111}
1112
1105static void vsock_connect_timeout(struct work_struct *work) 1113static void vsock_connect_timeout(struct work_struct *work)
1106{ 1114{
1107 struct sock *sk; 1115 struct sock *sk;
1108 struct vsock_sock *vsk; 1116 struct vsock_sock *vsk;
1117 int cancel = 0;
1109 1118
1110 vsk = container_of(work, struct vsock_sock, dwork.work); 1119 vsk = container_of(work, struct vsock_sock, dwork.work);
1111 sk = sk_vsock(vsk); 1120 sk = sk_vsock(vsk);
@@ -1116,8 +1125,11 @@ static void vsock_connect_timeout(struct work_struct *work)
1116 sk->sk_state = SS_UNCONNECTED; 1125 sk->sk_state = SS_UNCONNECTED;
1117 sk->sk_err = ETIMEDOUT; 1126 sk->sk_err = ETIMEDOUT;
1118 sk->sk_error_report(sk); 1127 sk->sk_error_report(sk);
1128 cancel = 1;
1119 } 1129 }
1120 release_sock(sk); 1130 release_sock(sk);
1131 if (cancel)
1132 vsock_transport_cancel_pkt(vsk);
1121 1133
1122 sock_put(sk); 1134 sock_put(sk);
1123} 1135}
@@ -1224,11 +1236,13 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
1224 err = sock_intr_errno(timeout); 1236 err = sock_intr_errno(timeout);
1225 sk->sk_state = SS_UNCONNECTED; 1237 sk->sk_state = SS_UNCONNECTED;
1226 sock->state = SS_UNCONNECTED; 1238 sock->state = SS_UNCONNECTED;
1239 vsock_transport_cancel_pkt(vsk);
1227 goto out_wait; 1240 goto out_wait;
1228 } else if (timeout == 0) { 1241 } else if (timeout == 0) {
1229 err = -ETIMEDOUT; 1242 err = -ETIMEDOUT;
1230 sk->sk_state = SS_UNCONNECTED; 1243 sk->sk_state = SS_UNCONNECTED;
1231 sock->state = SS_UNCONNECTED; 1244 sock->state = SS_UNCONNECTED;
1245 vsock_transport_cancel_pkt(vsk);
1232 goto out_wait; 1246 goto out_wait;
1233 } 1247 }
1234 1248
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 9d24c0e958b1..68675a151f22 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -213,6 +213,47 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
213 return len; 213 return len;
214} 214}
215 215
216static int
217virtio_transport_cancel_pkt(struct vsock_sock *vsk)
218{
219 struct virtio_vsock *vsock;
220 struct virtio_vsock_pkt *pkt, *n;
221 int cnt = 0;
222 LIST_HEAD(freeme);
223
224 vsock = virtio_vsock_get();
225 if (!vsock) {
226 return -ENODEV;
227 }
228
229 spin_lock_bh(&vsock->send_pkt_list_lock);
230 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
231 if (pkt->vsk != vsk)
232 continue;
233 list_move(&pkt->list, &freeme);
234 }
235 spin_unlock_bh(&vsock->send_pkt_list_lock);
236
237 list_for_each_entry_safe(pkt, n, &freeme, list) {
238 if (pkt->reply)
239 cnt++;
240 list_del(&pkt->list);
241 virtio_transport_free_pkt(pkt);
242 }
243
244 if (cnt) {
245 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
246 int new_cnt;
247
248 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
249 if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) &&
250 new_cnt < virtqueue_get_vring_size(rx_vq))
251 queue_work(virtio_vsock_workqueue, &vsock->rx_work);
252 }
253
254 return 0;
255}
256
216static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) 257static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
217{ 258{
218 int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE; 259 int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
@@ -462,6 +503,7 @@ static struct virtio_transport virtio_transport = {
462 .release = virtio_transport_release, 503 .release = virtio_transport_release,
463 .connect = virtio_transport_connect, 504 .connect = virtio_transport_connect,
464 .shutdown = virtio_transport_shutdown, 505 .shutdown = virtio_transport_shutdown,
506 .cancel_pkt = virtio_transport_cancel_pkt,
465 507
466 .dgram_bind = virtio_transport_dgram_bind, 508 .dgram_bind = virtio_transport_dgram_bind,
467 .dgram_dequeue = virtio_transport_dgram_dequeue, 509 .dgram_dequeue = virtio_transport_dgram_dequeue,
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 8d592a45b597..af087b44ceea 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -58,6 +58,7 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
58 pkt->len = len; 58 pkt->len = len;
59 pkt->hdr.len = cpu_to_le32(len); 59 pkt->hdr.len = cpu_to_le32(len);
60 pkt->reply = info->reply; 60 pkt->reply = info->reply;
61 pkt->vsk = info->vsk;
61 62
62 if (info->msg && len > 0) { 63 if (info->msg && len > 0) {
63 pkt->buf = kmalloc(len, GFP_KERNEL); 64 pkt->buf = kmalloc(len, GFP_KERNEL);
@@ -180,6 +181,7 @@ static int virtio_transport_send_credit_update(struct vsock_sock *vsk,
180 struct virtio_vsock_pkt_info info = { 181 struct virtio_vsock_pkt_info info = {
181 .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE, 182 .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
182 .type = type, 183 .type = type,
184 .vsk = vsk,
183 }; 185 };
184 186
185 return virtio_transport_send_pkt_info(vsk, &info); 187 return virtio_transport_send_pkt_info(vsk, &info);
@@ -519,6 +521,7 @@ int virtio_transport_connect(struct vsock_sock *vsk)
519 struct virtio_vsock_pkt_info info = { 521 struct virtio_vsock_pkt_info info = {
520 .op = VIRTIO_VSOCK_OP_REQUEST, 522 .op = VIRTIO_VSOCK_OP_REQUEST,
521 .type = VIRTIO_VSOCK_TYPE_STREAM, 523 .type = VIRTIO_VSOCK_TYPE_STREAM,
524 .vsk = vsk,
522 }; 525 };
523 526
524 return virtio_transport_send_pkt_info(vsk, &info); 527 return virtio_transport_send_pkt_info(vsk, &info);
@@ -534,6 +537,7 @@ int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
534 VIRTIO_VSOCK_SHUTDOWN_RCV : 0) | 537 VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
535 (mode & SEND_SHUTDOWN ? 538 (mode & SEND_SHUTDOWN ?
536 VIRTIO_VSOCK_SHUTDOWN_SEND : 0), 539 VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
540 .vsk = vsk,
537 }; 541 };
538 542
539 return virtio_transport_send_pkt_info(vsk, &info); 543 return virtio_transport_send_pkt_info(vsk, &info);
@@ -560,6 +564,7 @@ virtio_transport_stream_enqueue(struct vsock_sock *vsk,
560 .type = VIRTIO_VSOCK_TYPE_STREAM, 564 .type = VIRTIO_VSOCK_TYPE_STREAM,
561 .msg = msg, 565 .msg = msg,
562 .pkt_len = len, 566 .pkt_len = len,
567 .vsk = vsk,
563 }; 568 };
564 569
565 return virtio_transport_send_pkt_info(vsk, &info); 570 return virtio_transport_send_pkt_info(vsk, &info);
@@ -581,6 +586,7 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
581 .op = VIRTIO_VSOCK_OP_RST, 586 .op = VIRTIO_VSOCK_OP_RST,
582 .type = VIRTIO_VSOCK_TYPE_STREAM, 587 .type = VIRTIO_VSOCK_TYPE_STREAM,
583 .reply = !!pkt, 588 .reply = !!pkt,
589 .vsk = vsk,
584 }; 590 };
585 591
586 /* Send RST only if the original pkt is not a RST pkt */ 592 /* Send RST only if the original pkt is not a RST pkt */
@@ -826,6 +832,7 @@ virtio_transport_send_response(struct vsock_sock *vsk,
826 .remote_cid = le64_to_cpu(pkt->hdr.src_cid), 832 .remote_cid = le64_to_cpu(pkt->hdr.src_cid),
827 .remote_port = le32_to_cpu(pkt->hdr.src_port), 833 .remote_port = le32_to_cpu(pkt->hdr.src_port),
828 .reply = true, 834 .reply = true,
835 .vsk = vsk,
829 }; 836 };
830 837
831 return virtio_transport_send_pkt_info(vsk, &info); 838 return virtio_transport_send_pkt_info(vsk, &info);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index d7f8be4e321a..2312dc2ffdb9 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -545,22 +545,18 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
545{ 545{
546 int err; 546 int err;
547 547
548 rtnl_lock();
549
550 if (!cb->args[0]) { 548 if (!cb->args[0]) {
551 err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, 549 err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
552 genl_family_attrbuf(&nl80211_fam), 550 genl_family_attrbuf(&nl80211_fam),
553 nl80211_fam.maxattr, nl80211_policy); 551 nl80211_fam.maxattr, nl80211_policy);
554 if (err) 552 if (err)
555 goto out_unlock; 553 return err;
556 554
557 *wdev = __cfg80211_wdev_from_attrs( 555 *wdev = __cfg80211_wdev_from_attrs(
558 sock_net(skb->sk), 556 sock_net(skb->sk),
559 genl_family_attrbuf(&nl80211_fam)); 557 genl_family_attrbuf(&nl80211_fam));
560 if (IS_ERR(*wdev)) { 558 if (IS_ERR(*wdev))
561 err = PTR_ERR(*wdev); 559 return PTR_ERR(*wdev);
562 goto out_unlock;
563 }
564 *rdev = wiphy_to_rdev((*wdev)->wiphy); 560 *rdev = wiphy_to_rdev((*wdev)->wiphy);
565 /* 0 is the first index - add 1 to parse only once */ 561 /* 0 is the first index - add 1 to parse only once */
566 cb->args[0] = (*rdev)->wiphy_idx + 1; 562 cb->args[0] = (*rdev)->wiphy_idx + 1;
@@ -570,10 +566,8 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
570 struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); 566 struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
571 struct wireless_dev *tmp; 567 struct wireless_dev *tmp;
572 568
573 if (!wiphy) { 569 if (!wiphy)
574 err = -ENODEV; 570 return -ENODEV;
575 goto out_unlock;
576 }
577 *rdev = wiphy_to_rdev(wiphy); 571 *rdev = wiphy_to_rdev(wiphy);
578 *wdev = NULL; 572 *wdev = NULL;
579 573
@@ -584,21 +578,11 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
584 } 578 }
585 } 579 }
586 580
587 if (!*wdev) { 581 if (!*wdev)
588 err = -ENODEV; 582 return -ENODEV;
589 goto out_unlock;
590 }
591 } 583 }
592 584
593 return 0; 585 return 0;
594 out_unlock:
595 rtnl_unlock();
596 return err;
597}
598
599static void nl80211_finish_wdev_dump(struct cfg80211_registered_device *rdev)
600{
601 rtnl_unlock();
602} 586}
603 587
604/* IE validation */ 588/* IE validation */
@@ -2608,17 +2592,17 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
2608 int filter_wiphy = -1; 2592 int filter_wiphy = -1;
2609 struct cfg80211_registered_device *rdev; 2593 struct cfg80211_registered_device *rdev;
2610 struct wireless_dev *wdev; 2594 struct wireless_dev *wdev;
2595 int ret;
2611 2596
2612 rtnl_lock(); 2597 rtnl_lock();
2613 if (!cb->args[2]) { 2598 if (!cb->args[2]) {
2614 struct nl80211_dump_wiphy_state state = { 2599 struct nl80211_dump_wiphy_state state = {
2615 .filter_wiphy = -1, 2600 .filter_wiphy = -1,
2616 }; 2601 };
2617 int ret;
2618 2602
2619 ret = nl80211_dump_wiphy_parse(skb, cb, &state); 2603 ret = nl80211_dump_wiphy_parse(skb, cb, &state);
2620 if (ret) 2604 if (ret)
2621 return ret; 2605 goto out_unlock;
2622 2606
2623 filter_wiphy = state.filter_wiphy; 2607 filter_wiphy = state.filter_wiphy;
2624 2608
@@ -2663,12 +2647,14 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
2663 wp_idx++; 2647 wp_idx++;
2664 } 2648 }
2665 out: 2649 out:
2666 rtnl_unlock();
2667
2668 cb->args[0] = wp_idx; 2650 cb->args[0] = wp_idx;
2669 cb->args[1] = if_idx; 2651 cb->args[1] = if_idx;
2670 2652
2671 return skb->len; 2653 ret = skb->len;
2654 out_unlock:
2655 rtnl_unlock();
2656
2657 return ret;
2672} 2658}
2673 2659
2674static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info) 2660static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
@@ -4452,9 +4438,10 @@ static int nl80211_dump_station(struct sk_buff *skb,
4452 int sta_idx = cb->args[2]; 4438 int sta_idx = cb->args[2];
4453 int err; 4439 int err;
4454 4440
4441 rtnl_lock();
4455 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); 4442 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
4456 if (err) 4443 if (err)
4457 return err; 4444 goto out_err;
4458 4445
4459 if (!wdev->netdev) { 4446 if (!wdev->netdev) {
4460 err = -EINVAL; 4447 err = -EINVAL;
@@ -4489,7 +4476,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
4489 cb->args[2] = sta_idx; 4476 cb->args[2] = sta_idx;
4490 err = skb->len; 4477 err = skb->len;
4491 out_err: 4478 out_err:
4492 nl80211_finish_wdev_dump(rdev); 4479 rtnl_unlock();
4493 4480
4494 return err; 4481 return err;
4495} 4482}
@@ -5275,9 +5262,10 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
5275 int path_idx = cb->args[2]; 5262 int path_idx = cb->args[2];
5276 int err; 5263 int err;
5277 5264
5265 rtnl_lock();
5278 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); 5266 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
5279 if (err) 5267 if (err)
5280 return err; 5268 goto out_err;
5281 5269
5282 if (!rdev->ops->dump_mpath) { 5270 if (!rdev->ops->dump_mpath) {
5283 err = -EOPNOTSUPP; 5271 err = -EOPNOTSUPP;
@@ -5310,7 +5298,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
5310 cb->args[2] = path_idx; 5298 cb->args[2] = path_idx;
5311 err = skb->len; 5299 err = skb->len;
5312 out_err: 5300 out_err:
5313 nl80211_finish_wdev_dump(rdev); 5301 rtnl_unlock();
5314 return err; 5302 return err;
5315} 5303}
5316 5304
@@ -5470,9 +5458,10 @@ static int nl80211_dump_mpp(struct sk_buff *skb,
5470 int path_idx = cb->args[2]; 5458 int path_idx = cb->args[2];
5471 int err; 5459 int err;
5472 5460
5461 rtnl_lock();
5473 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); 5462 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
5474 if (err) 5463 if (err)
5475 return err; 5464 goto out_err;
5476 5465
5477 if (!rdev->ops->dump_mpp) { 5466 if (!rdev->ops->dump_mpp) {
5478 err = -EOPNOTSUPP; 5467 err = -EOPNOTSUPP;
@@ -5505,7 +5494,7 @@ static int nl80211_dump_mpp(struct sk_buff *skb,
5505 cb->args[2] = path_idx; 5494 cb->args[2] = path_idx;
5506 err = skb->len; 5495 err = skb->len;
5507 out_err: 5496 out_err:
5508 nl80211_finish_wdev_dump(rdev); 5497 rtnl_unlock();
5509 return err; 5498 return err;
5510} 5499}
5511 5500
@@ -7674,9 +7663,12 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
7674 int start = cb->args[2], idx = 0; 7663 int start = cb->args[2], idx = 0;
7675 int err; 7664 int err;
7676 7665
7666 rtnl_lock();
7677 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); 7667 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
7678 if (err) 7668 if (err) {
7669 rtnl_unlock();
7679 return err; 7670 return err;
7671 }
7680 7672
7681 wdev_lock(wdev); 7673 wdev_lock(wdev);
7682 spin_lock_bh(&rdev->bss_lock); 7674 spin_lock_bh(&rdev->bss_lock);
@@ -7699,7 +7691,7 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
7699 wdev_unlock(wdev); 7691 wdev_unlock(wdev);
7700 7692
7701 cb->args[2] = idx; 7693 cb->args[2] = idx;
7702 nl80211_finish_wdev_dump(rdev); 7694 rtnl_unlock();
7703 7695
7704 return skb->len; 7696 return skb->len;
7705} 7697}
@@ -7784,9 +7776,10 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
7784 int res; 7776 int res;
7785 bool radio_stats; 7777 bool radio_stats;
7786 7778
7779 rtnl_lock();
7787 res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); 7780 res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
7788 if (res) 7781 if (res)
7789 return res; 7782 goto out_err;
7790 7783
7791 /* prepare_wdev_dump parsed the attributes */ 7784 /* prepare_wdev_dump parsed the attributes */
7792 radio_stats = attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS]; 7785 radio_stats = attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS];
@@ -7827,7 +7820,7 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
7827 cb->args[2] = survey_idx; 7820 cb->args[2] = survey_idx;
7828 res = skb->len; 7821 res = skb->len;
7829 out_err: 7822 out_err:
7830 nl80211_finish_wdev_dump(rdev); 7823 rtnl_unlock();
7831 return res; 7824 return res;
7832} 7825}
7833 7826
@@ -11508,17 +11501,13 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
11508 void *data = NULL; 11501 void *data = NULL;
11509 unsigned int data_len = 0; 11502 unsigned int data_len = 0;
11510 11503
11511 rtnl_lock();
11512
11513 if (cb->args[0]) { 11504 if (cb->args[0]) {
11514 /* subtract the 1 again here */ 11505 /* subtract the 1 again here */
11515 struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); 11506 struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
11516 struct wireless_dev *tmp; 11507 struct wireless_dev *tmp;
11517 11508
11518 if (!wiphy) { 11509 if (!wiphy)
11519 err = -ENODEV; 11510 return -ENODEV;
11520 goto out_unlock;
11521 }
11522 *rdev = wiphy_to_rdev(wiphy); 11511 *rdev = wiphy_to_rdev(wiphy);
11523 *wdev = NULL; 11512 *wdev = NULL;
11524 11513
@@ -11538,23 +11527,19 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
11538 err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, 11527 err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
11539 attrbuf, nl80211_fam.maxattr, nl80211_policy); 11528 attrbuf, nl80211_fam.maxattr, nl80211_policy);
11540 if (err) 11529 if (err)
11541 goto out_unlock; 11530 return err;
11542 11531
11543 if (!attrbuf[NL80211_ATTR_VENDOR_ID] || 11532 if (!attrbuf[NL80211_ATTR_VENDOR_ID] ||
11544 !attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) { 11533 !attrbuf[NL80211_ATTR_VENDOR_SUBCMD])
11545 err = -EINVAL; 11534 return -EINVAL;
11546 goto out_unlock;
11547 }
11548 11535
11549 *wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), attrbuf); 11536 *wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), attrbuf);
11550 if (IS_ERR(*wdev)) 11537 if (IS_ERR(*wdev))
11551 *wdev = NULL; 11538 *wdev = NULL;
11552 11539
11553 *rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf); 11540 *rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf);
11554 if (IS_ERR(*rdev)) { 11541 if (IS_ERR(*rdev))
11555 err = PTR_ERR(*rdev); 11542 return PTR_ERR(*rdev);
11556 goto out_unlock;
11557 }
11558 11543
11559 vid = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_ID]); 11544 vid = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_ID]);
11560 subcmd = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_SUBCMD]); 11545 subcmd = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_SUBCMD]);
@@ -11567,19 +11552,15 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
11567 if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd) 11552 if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd)
11568 continue; 11553 continue;
11569 11554
11570 if (!vcmd->dumpit) { 11555 if (!vcmd->dumpit)
11571 err = -EOPNOTSUPP; 11556 return -EOPNOTSUPP;
11572 goto out_unlock;
11573 }
11574 11557
11575 vcmd_idx = i; 11558 vcmd_idx = i;
11576 break; 11559 break;
11577 } 11560 }
11578 11561
11579 if (vcmd_idx < 0) { 11562 if (vcmd_idx < 0)
11580 err = -EOPNOTSUPP; 11563 return -EOPNOTSUPP;
11581 goto out_unlock;
11582 }
11583 11564
11584 if (attrbuf[NL80211_ATTR_VENDOR_DATA]) { 11565 if (attrbuf[NL80211_ATTR_VENDOR_DATA]) {
11585 data = nla_data(attrbuf[NL80211_ATTR_VENDOR_DATA]); 11566 data = nla_data(attrbuf[NL80211_ATTR_VENDOR_DATA]);
@@ -11596,9 +11577,6 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
11596 11577
11597 /* keep rtnl locked in successful case */ 11578 /* keep rtnl locked in successful case */
11598 return 0; 11579 return 0;
11599 out_unlock:
11600 rtnl_unlock();
11601 return err;
11602} 11580}
11603 11581
11604static int nl80211_vendor_cmd_dump(struct sk_buff *skb, 11582static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
@@ -11613,9 +11591,10 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
11613 int err; 11591 int err;
11614 struct nlattr *vendor_data; 11592 struct nlattr *vendor_data;
11615 11593
11594 rtnl_lock();
11616 err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev); 11595 err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev);
11617 if (err) 11596 if (err)
11618 return err; 11597 goto out;
11619 11598
11620 vcmd_idx = cb->args[2]; 11599 vcmd_idx = cb->args[2];
11621 data = (void *)cb->args[3]; 11600 data = (void *)cb->args[3];
@@ -11624,15 +11603,21 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
11624 11603
11625 if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV | 11604 if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV |
11626 WIPHY_VENDOR_CMD_NEED_NETDEV)) { 11605 WIPHY_VENDOR_CMD_NEED_NETDEV)) {
11627 if (!wdev) 11606 if (!wdev) {
11628 return -EINVAL; 11607 err = -EINVAL;
11608 goto out;
11609 }
11629 if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV && 11610 if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV &&
11630 !wdev->netdev) 11611 !wdev->netdev) {
11631 return -EINVAL; 11612 err = -EINVAL;
11613 goto out;
11614 }
11632 11615
11633 if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) { 11616 if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) {
11634 if (!wdev_running(wdev)) 11617 if (!wdev_running(wdev)) {
11635 return -ENETDOWN; 11618 err = -ENETDOWN;
11619 goto out;
11620 }
11636 } 11621 }
11637 } 11622 }
11638 11623
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 16b6b5988be9..570a2b67ca10 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -132,12 +132,10 @@ static int wiphy_resume(struct device *dev)
132 /* Age scan results with time spent in suspend */ 132 /* Age scan results with time spent in suspend */
133 cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at); 133 cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at);
134 134
135 if (rdev->ops->resume) { 135 rtnl_lock();
136 rtnl_lock(); 136 if (rdev->wiphy.registered && rdev->ops->resume)
137 if (rdev->wiphy.registered) 137 ret = rdev_resume(rdev);
138 ret = rdev_resume(rdev); 138 rtnl_unlock();
139 rtnl_unlock();
140 }
141 139
142 return ret; 140 return ret;
143} 141}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 9705c279494b..40a8aa39220d 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -412,7 +412,14 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es
412 up = nla_data(rp); 412 up = nla_data(rp);
413 ulen = xfrm_replay_state_esn_len(up); 413 ulen = xfrm_replay_state_esn_len(up);
414 414
415 if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen) 415 /* Check the overall length and the internal bitmap length to avoid
416 * potential overflow. */
417 if (nla_len(rp) < ulen ||
418 xfrm_replay_state_esn_len(replay_esn) != ulen ||
419 replay_esn->bmp_len != up->bmp_len)
420 return -EINVAL;
421
422 if (up->replay_window > up->bmp_len * sizeof(__u32) * 8)
416 return -EINVAL; 423 return -EINVAL;
417 424
418 return 0; 425 return 0;
diff --git a/samples/statx/test-statx.c b/samples/statx/test-statx.c
index 8571d766331d..d4d77b09412c 100644
--- a/samples/statx/test-statx.c
+++ b/samples/statx/test-statx.c
@@ -141,8 +141,8 @@ static void dump_statx(struct statx *stx)
141 if (stx->stx_mask & STATX_BTIME) 141 if (stx->stx_mask & STATX_BTIME)
142 print_time(" Birth: ", &stx->stx_btime); 142 print_time(" Birth: ", &stx->stx_btime);
143 143
144 if (stx->stx_attributes) { 144 if (stx->stx_attributes_mask) {
145 unsigned char bits; 145 unsigned char bits, mbits;
146 int loop, byte; 146 int loop, byte;
147 147
148 static char attr_representation[64 + 1] = 148 static char attr_representation[64 + 1] =
@@ -160,14 +160,18 @@ static void dump_statx(struct statx *stx)
160 printf("Attributes: %016llx (", stx->stx_attributes); 160 printf("Attributes: %016llx (", stx->stx_attributes);
161 for (byte = 64 - 8; byte >= 0; byte -= 8) { 161 for (byte = 64 - 8; byte >= 0; byte -= 8) {
162 bits = stx->stx_attributes >> byte; 162 bits = stx->stx_attributes >> byte;
163 mbits = stx->stx_attributes_mask >> byte;
163 for (loop = 7; loop >= 0; loop--) { 164 for (loop = 7; loop >= 0; loop--) {
164 int bit = byte + loop; 165 int bit = byte + loop;
165 166
166 if (bits & 0x80) 167 if (!(mbits & 0x80))
168 putchar('.'); /* Not supported */
169 else if (bits & 0x80)
167 putchar(attr_representation[63 - bit]); 170 putchar(attr_representation[63 - bit]);
168 else 171 else
169 putchar('-'); 172 putchar('-'); /* Not set */
170 bits <<= 1; 173 bits <<= 1;
174 mbits <<= 1;
171 } 175 }
172 if (byte) 176 if (byte)
173 putchar(' '); 177 putchar(' ');
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index d6ca649cb0e9..afe3fd3af1e4 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -148,6 +148,10 @@ cc-fullversion = $(shell $(CONFIG_SHELL) \
148# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) 148# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
149cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4)) 149cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4))
150 150
151# cc-if-fullversion
152# Usage: EXTRA_CFLAGS += $(call cc-if-fullversion, -lt, 040502, -O1)
153cc-if-fullversion = $(shell [ $(cc-fullversion) $(1) $(2) ] && echo $(3) || echo $(4))
154
151# cc-ldoption 155# cc-ldoption
152# Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both) 156# Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
153cc-ldoption = $(call try-run,\ 157cc-ldoption = $(call try-run,\
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 0a07f9014944..7234e61e7ce3 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -155,7 +155,7 @@ else
155# $(call addtree,-I$(obj)) locates .h files in srctree, from generated .c files 155# $(call addtree,-I$(obj)) locates .h files in srctree, from generated .c files
156# and locates generated .h files 156# and locates generated .h files
157# FIXME: Replace both with specific CFLAGS* statements in the makefiles 157# FIXME: Replace both with specific CFLAGS* statements in the makefiles
158__c_flags = $(if $(obj),-I$(srctree)/$(src) -I$(obj)) \ 158__c_flags = $(if $(obj),$(call addtree,-I$(src)) -I$(obj)) \
159 $(call flags,_c_flags) 159 $(call flags,_c_flags)
160__a_flags = $(call flags,_a_flags) 160__a_flags = $(call flags,_a_flags)
161__cpp_flags = $(call flags,_cpp_flags) 161__cpp_flags = $(call flags,_cpp_flags)
diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
index 26d208b435a0..cfddddb9c9d7 100644
--- a/scripts/kconfig/gconf.c
+++ b/scripts/kconfig/gconf.c
@@ -914,7 +914,7 @@ on_treeview2_button_press_event(GtkWidget * widget,
914 current = menu; 914 current = menu;
915 display_tree_part(); 915 display_tree_part();
916 gtk_widget_set_sensitive(back_btn, TRUE); 916 gtk_widget_set_sensitive(back_btn, TRUE);
917 } else if ((col == COL_OPTION)) { 917 } else if (col == COL_OPTION) {
918 toggle_sym_value(menu); 918 toggle_sym_value(menu);
919 gtk_tree_view_expand_row(view, path, TRUE); 919 gtk_tree_view_expand_row(view, path, TRUE);
920 } 920 }
diff --git a/security/keys/gc.c b/security/keys/gc.c
index addf060399e0..9cb4fe4478a1 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -46,7 +46,7 @@ static unsigned long key_gc_flags;
46 * immediately unlinked. 46 * immediately unlinked.
47 */ 47 */
48struct key_type key_type_dead = { 48struct key_type key_type_dead = {
49 .name = "dead", 49 .name = ".dead",
50}; 50};
51 51
52/* 52/*
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 52c34532c785..4ad3212adebe 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -273,7 +273,8 @@ error:
273 * Create and join an anonymous session keyring or join a named session 273 * Create and join an anonymous session keyring or join a named session
274 * keyring, creating it if necessary. A named session keyring must have Search 274 * keyring, creating it if necessary. A named session keyring must have Search
275 * permission for it to be joined. Session keyrings without this permit will 275 * permission for it to be joined. Session keyrings without this permit will
276 * be skipped over. 276 * be skipped over. It is not permitted for userspace to create or join
277 * keyrings whose name begin with a dot.
277 * 278 *
278 * If successful, the ID of the joined session keyring will be returned. 279 * If successful, the ID of the joined session keyring will be returned.
279 */ 280 */
@@ -290,12 +291,16 @@ long keyctl_join_session_keyring(const char __user *_name)
290 ret = PTR_ERR(name); 291 ret = PTR_ERR(name);
291 goto error; 292 goto error;
292 } 293 }
294
295 ret = -EPERM;
296 if (name[0] == '.')
297 goto error_name;
293 } 298 }
294 299
295 /* join the session */ 300 /* join the session */
296 ret = join_session_keyring(name); 301 ret = join_session_keyring(name);
302error_name:
297 kfree(name); 303 kfree(name);
298
299error: 304error:
300 return ret; 305 return ret;
301} 306}
@@ -1253,8 +1258,8 @@ error:
1253 * Read or set the default keyring in which request_key() will cache keys and 1258 * Read or set the default keyring in which request_key() will cache keys and
1254 * return the old setting. 1259 * return the old setting.
1255 * 1260 *
1256 * If a process keyring is specified then this will be created if it doesn't 1261 * If a thread or process keyring is specified then it will be created if it
1257 * yet exist. The old setting will be returned if successful. 1262 * doesn't yet exist. The old setting will be returned if successful.
1258 */ 1263 */
1259long keyctl_set_reqkey_keyring(int reqkey_defl) 1264long keyctl_set_reqkey_keyring(int reqkey_defl)
1260{ 1265{
@@ -1279,11 +1284,8 @@ long keyctl_set_reqkey_keyring(int reqkey_defl)
1279 1284
1280 case KEY_REQKEY_DEFL_PROCESS_KEYRING: 1285 case KEY_REQKEY_DEFL_PROCESS_KEYRING:
1281 ret = install_process_keyring_to_cred(new); 1286 ret = install_process_keyring_to_cred(new);
1282 if (ret < 0) { 1287 if (ret < 0)
1283 if (ret != -EEXIST) 1288 goto error;
1284 goto error;
1285 ret = 0;
1286 }
1287 goto set; 1289 goto set;
1288 1290
1289 case KEY_REQKEY_DEFL_DEFAULT: 1291 case KEY_REQKEY_DEFL_DEFAULT:
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index b6fdd22205b1..9139b18fc863 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -128,13 +128,18 @@ error:
128} 128}
129 129
130/* 130/*
131 * Install a fresh thread keyring directly to new credentials. This keyring is 131 * Install a thread keyring to the given credentials struct if it didn't have
132 * allowed to overrun the quota. 132 * one already. This is allowed to overrun the quota.
133 *
134 * Return: 0 if a thread keyring is now present; -errno on failure.
133 */ 135 */
134int install_thread_keyring_to_cred(struct cred *new) 136int install_thread_keyring_to_cred(struct cred *new)
135{ 137{
136 struct key *keyring; 138 struct key *keyring;
137 139
140 if (new->thread_keyring)
141 return 0;
142
138 keyring = keyring_alloc("_tid", new->uid, new->gid, new, 143 keyring = keyring_alloc("_tid", new->uid, new->gid, new,
139 KEY_POS_ALL | KEY_USR_VIEW, 144 KEY_POS_ALL | KEY_USR_VIEW,
140 KEY_ALLOC_QUOTA_OVERRUN, 145 KEY_ALLOC_QUOTA_OVERRUN,
@@ -147,7 +152,9 @@ int install_thread_keyring_to_cred(struct cred *new)
147} 152}
148 153
149/* 154/*
150 * Install a fresh thread keyring, discarding the old one. 155 * Install a thread keyring to the current task if it didn't have one already.
156 *
157 * Return: 0 if a thread keyring is now present; -errno on failure.
151 */ 158 */
152static int install_thread_keyring(void) 159static int install_thread_keyring(void)
153{ 160{
@@ -158,8 +165,6 @@ static int install_thread_keyring(void)
158 if (!new) 165 if (!new)
159 return -ENOMEM; 166 return -ENOMEM;
160 167
161 BUG_ON(new->thread_keyring);
162
163 ret = install_thread_keyring_to_cred(new); 168 ret = install_thread_keyring_to_cred(new);
164 if (ret < 0) { 169 if (ret < 0) {
165 abort_creds(new); 170 abort_creds(new);
@@ -170,17 +175,17 @@ static int install_thread_keyring(void)
170} 175}
171 176
172/* 177/*
173 * Install a process keyring directly to a credentials struct. 178 * Install a process keyring to the given credentials struct if it didn't have
179 * one already. This is allowed to overrun the quota.
174 * 180 *
175 * Returns -EEXIST if there was already a process keyring, 0 if one installed, 181 * Return: 0 if a process keyring is now present; -errno on failure.
176 * and other value on any other error
177 */ 182 */
178int install_process_keyring_to_cred(struct cred *new) 183int install_process_keyring_to_cred(struct cred *new)
179{ 184{
180 struct key *keyring; 185 struct key *keyring;
181 186
182 if (new->process_keyring) 187 if (new->process_keyring)
183 return -EEXIST; 188 return 0;
184 189
185 keyring = keyring_alloc("_pid", new->uid, new->gid, new, 190 keyring = keyring_alloc("_pid", new->uid, new->gid, new,
186 KEY_POS_ALL | KEY_USR_VIEW, 191 KEY_POS_ALL | KEY_USR_VIEW,
@@ -194,11 +199,9 @@ int install_process_keyring_to_cred(struct cred *new)
194} 199}
195 200
196/* 201/*
197 * Make sure a process keyring is installed for the current process. The 202 * Install a process keyring to the current task if it didn't have one already.
198 * existing process keyring is not replaced.
199 * 203 *
200 * Returns 0 if there is a process keyring by the end of this function, some 204 * Return: 0 if a process keyring is now present; -errno on failure.
201 * error otherwise.
202 */ 205 */
203static int install_process_keyring(void) 206static int install_process_keyring(void)
204{ 207{
@@ -212,14 +215,18 @@ static int install_process_keyring(void)
212 ret = install_process_keyring_to_cred(new); 215 ret = install_process_keyring_to_cred(new);
213 if (ret < 0) { 216 if (ret < 0) {
214 abort_creds(new); 217 abort_creds(new);
215 return ret != -EEXIST ? ret : 0; 218 return ret;
216 } 219 }
217 220
218 return commit_creds(new); 221 return commit_creds(new);
219} 222}
220 223
221/* 224/*
222 * Install a session keyring directly to a credentials struct. 225 * Install the given keyring as the session keyring of the given credentials
226 * struct, replacing the existing one if any. If the given keyring is NULL,
227 * then install a new anonymous session keyring.
228 *
229 * Return: 0 on success; -errno on failure.
223 */ 230 */
224int install_session_keyring_to_cred(struct cred *cred, struct key *keyring) 231int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
225{ 232{
@@ -254,8 +261,11 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
254} 261}
255 262
256/* 263/*
257 * Install a session keyring, discarding the old one. If a keyring is not 264 * Install the given keyring as the session keyring of the current task,
258 * supplied, an empty one is invented. 265 * replacing the existing one if any. If the given keyring is NULL, then
266 * install a new anonymous session keyring.
267 *
268 * Return: 0 on success; -errno on failure.
259 */ 269 */
260static int install_session_keyring(struct key *keyring) 270static int install_session_keyring(struct key *keyring)
261{ 271{
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index d15b653de0bf..ee08c389b4d6 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -268,7 +268,7 @@ int snd_hdac_bus_parse_capabilities(struct hdac_bus *bus)
268 unsigned int offset; 268 unsigned int offset;
269 unsigned int counter = 0; 269 unsigned int counter = 0;
270 270
271 offset = snd_hdac_chip_readl(bus, LLCH); 271 offset = snd_hdac_chip_readw(bus, LLCH);
272 272
273 /* Lets walk the linked capabilities list */ 273 /* Lets walk the linked capabilities list */
274 do { 274 do {
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 182d92efc7c8..c0abad2067e1 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -47,6 +47,7 @@ source "sound/soc/cirrus/Kconfig"
47source "sound/soc/davinci/Kconfig" 47source "sound/soc/davinci/Kconfig"
48source "sound/soc/dwc/Kconfig" 48source "sound/soc/dwc/Kconfig"
49source "sound/soc/fsl/Kconfig" 49source "sound/soc/fsl/Kconfig"
50source "sound/soc/hisilicon/Kconfig"
50source "sound/soc/jz4740/Kconfig" 51source "sound/soc/jz4740/Kconfig"
51source "sound/soc/nuc900/Kconfig" 52source "sound/soc/nuc900/Kconfig"
52source "sound/soc/omap/Kconfig" 53source "sound/soc/omap/Kconfig"
@@ -63,6 +64,7 @@ source "sound/soc/sh/Kconfig"
63source "sound/soc/sirf/Kconfig" 64source "sound/soc/sirf/Kconfig"
64source "sound/soc/spear/Kconfig" 65source "sound/soc/spear/Kconfig"
65source "sound/soc/sti/Kconfig" 66source "sound/soc/sti/Kconfig"
67source "sound/soc/stm/Kconfig"
66source "sound/soc/sunxi/Kconfig" 68source "sound/soc/sunxi/Kconfig"
67source "sound/soc/tegra/Kconfig" 69source "sound/soc/tegra/Kconfig"
68source "sound/soc/txx9/Kconfig" 70source "sound/soc/txx9/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 9a30f21d16ee..39c27a58158d 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_SND_SOC) += cirrus/
27obj-$(CONFIG_SND_SOC) += davinci/ 27obj-$(CONFIG_SND_SOC) += davinci/
28obj-$(CONFIG_SND_SOC) += dwc/ 28obj-$(CONFIG_SND_SOC) += dwc/
29obj-$(CONFIG_SND_SOC) += fsl/ 29obj-$(CONFIG_SND_SOC) += fsl/
30obj-$(CONFIG_SND_SOC) += hisilicon/
30obj-$(CONFIG_SND_SOC) += jz4740/ 31obj-$(CONFIG_SND_SOC) += jz4740/
31obj-$(CONFIG_SND_SOC) += img/ 32obj-$(CONFIG_SND_SOC) += img/
32obj-$(CONFIG_SND_SOC) += intel/ 33obj-$(CONFIG_SND_SOC) += intel/
@@ -43,6 +44,7 @@ obj-$(CONFIG_SND_SOC) += sh/
43obj-$(CONFIG_SND_SOC) += sirf/ 44obj-$(CONFIG_SND_SOC) += sirf/
44obj-$(CONFIG_SND_SOC) += spear/ 45obj-$(CONFIG_SND_SOC) += spear/
45obj-$(CONFIG_SND_SOC) += sti/ 46obj-$(CONFIG_SND_SOC) += sti/
47obj-$(CONFIG_SND_SOC) += stm/
46obj-$(CONFIG_SND_SOC) += sunxi/ 48obj-$(CONFIG_SND_SOC) += sunxi/
47obj-$(CONFIG_SND_SOC) += tegra/ 49obj-$(CONFIG_SND_SOC) += tegra/
48obj-$(CONFIG_SND_SOC) += txx9/ 50obj-$(CONFIG_SND_SOC) += txx9/
diff --git a/sound/soc/blackfin/bfin-eval-adau1373.c b/sound/soc/blackfin/bfin-eval-adau1373.c
index 72ac78988426..64b88fdc1f6c 100644
--- a/sound/soc/blackfin/bfin-eval-adau1373.c
+++ b/sound/soc/blackfin/bfin-eval-adau1373.c
@@ -119,7 +119,7 @@ static int bfin_eval_adau1373_codec_init(struct snd_soc_pcm_runtime *rtd)
119 119
120 return ret; 120 return ret;
121} 121}
122static struct snd_soc_ops bfin_eval_adau1373_ops = { 122static const struct snd_soc_ops bfin_eval_adau1373_ops = {
123 .hw_params = bfin_eval_adau1373_hw_params, 123 .hw_params = bfin_eval_adau1373_hw_params,
124}; 124};
125 125
diff --git a/sound/soc/blackfin/bfin-eval-adav80x.c b/sound/soc/blackfin/bfin-eval-adav80x.c
index 1037477d10b2..99e5ecabdcda 100644
--- a/sound/soc/blackfin/bfin-eval-adav80x.c
+++ b/sound/soc/blackfin/bfin-eval-adav80x.c
@@ -64,7 +64,7 @@ static int bfin_eval_adav80x_codec_init(struct snd_soc_pcm_runtime *rtd)
64 return 0; 64 return 0;
65} 65}
66 66
67static struct snd_soc_ops bfin_eval_adav80x_ops = { 67static const struct snd_soc_ops bfin_eval_adav80x_ops = {
68 .hw_params = bfin_eval_adav80x_hw_params, 68 .hw_params = bfin_eval_adav80x_hw_params,
69}; 69};
70 70
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index e49e9da7f1f6..883ed4c8a551 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -49,6 +49,7 @@ config SND_SOC_ALL_CODECS
49 select SND_SOC_CS35L32 if I2C 49 select SND_SOC_CS35L32 if I2C
50 select SND_SOC_CS35L33 if I2C 50 select SND_SOC_CS35L33 if I2C
51 select SND_SOC_CS35L34 if I2C 51 select SND_SOC_CS35L34 if I2C
52 select SND_SOC_CS35L35 if I2C
52 select SND_SOC_CS42L42 if I2C 53 select SND_SOC_CS42L42 if I2C
53 select SND_SOC_CS42L51_I2C if I2C 54 select SND_SOC_CS42L51_I2C if I2C
54 select SND_SOC_CS42L52 if I2C && INPUT 55 select SND_SOC_CS42L52 if I2C && INPUT
@@ -69,9 +70,11 @@ config SND_SOC_ALL_CODECS
69 select SND_SOC_DA7219 if I2C 70 select SND_SOC_DA7219 if I2C
70 select SND_SOC_DA732X if I2C 71 select SND_SOC_DA732X if I2C
71 select SND_SOC_DA9055 if I2C 72 select SND_SOC_DA9055 if I2C
73 select SND_SOC_DIO2125
72 select SND_SOC_DMIC 74 select SND_SOC_DMIC
73 select SND_SOC_ES8328_SPI if SPI_MASTER 75 select SND_SOC_ES8328_SPI if SPI_MASTER
74 select SND_SOC_ES8328_I2C if I2C 76 select SND_SOC_ES8328_I2C if I2C
77 select SND_SOC_ES7134
75 select SND_SOC_GTM601 78 select SND_SOC_GTM601
76 select SND_SOC_HDAC_HDMI 79 select SND_SOC_HDAC_HDMI
77 select SND_SOC_ICS43432 80 select SND_SOC_ICS43432
@@ -89,6 +92,7 @@ config SND_SOC_ALL_CODECS
89 select SND_SOC_MAX9867 if I2C 92 select SND_SOC_MAX9867 if I2C
90 select SND_SOC_MAX98925 if I2C 93 select SND_SOC_MAX98925 if I2C
91 select SND_SOC_MAX98926 if I2C 94 select SND_SOC_MAX98926 if I2C
95 select SND_SOC_MAX98927 if I2C
92 select SND_SOC_MAX9850 if I2C 96 select SND_SOC_MAX9850 if I2C
93 select SND_SOC_MAX9860 if I2C 97 select SND_SOC_MAX9860 if I2C
94 select SND_SOC_MAX9768 if I2C 98 select SND_SOC_MAX9768 if I2C
@@ -97,6 +101,7 @@ config SND_SOC_ALL_CODECS
97 select SND_SOC_ML26124 if I2C 101 select SND_SOC_ML26124 if I2C
98 select SND_SOC_NAU8540 if I2C 102 select SND_SOC_NAU8540 if I2C
99 select SND_SOC_NAU8810 if I2C 103 select SND_SOC_NAU8810 if I2C
104 select SND_SOC_NAU8824 if I2C
100 select SND_SOC_NAU8825 if I2C 105 select SND_SOC_NAU8825 if I2C
101 select SND_SOC_HDMI_CODEC 106 select SND_SOC_HDMI_CODEC
102 select SND_SOC_PCM1681 if I2C 107 select SND_SOC_PCM1681 if I2C
@@ -303,12 +308,14 @@ config SND_SOC_ADAU1761
303 select SND_SOC_ADAU17X1 308 select SND_SOC_ADAU17X1
304 309
305config SND_SOC_ADAU1761_I2C 310config SND_SOC_ADAU1761_I2C
306 tristate 311 tristate "Analog Devices AU1761 CODEC - I2C"
312 depends on I2C
307 select SND_SOC_ADAU1761 313 select SND_SOC_ADAU1761
308 select REGMAP_I2C 314 select REGMAP_I2C
309 315
310config SND_SOC_ADAU1761_SPI 316config SND_SOC_ADAU1761_SPI
311 tristate 317 tristate "Analog Devices AU1761 CODEC - SPI"
318 depends on SPI
312 select SND_SOC_ADAU1761 319 select SND_SOC_ADAU1761
313 select REGMAP_SPI 320 select REGMAP_SPI
314 321
@@ -408,6 +415,10 @@ config SND_SOC_CS35L34
408 tristate "Cirrus Logic CS35L34 CODEC" 415 tristate "Cirrus Logic CS35L34 CODEC"
409 depends on I2C 416 depends on I2C
410 417
418config SND_SOC_CS35L35
419 tristate "Cirrus Logic CS35L35 CODEC"
420 depends on I2C
421
411config SND_SOC_CS42L42 422config SND_SOC_CS42L42
412 tristate "Cirrus Logic CS42L42 CODEC" 423 tristate "Cirrus Logic CS42L42 CODEC"
413 depends on I2C 424 depends on I2C
@@ -516,6 +527,10 @@ config SND_SOC_DA732X
516config SND_SOC_DA9055 527config SND_SOC_DA9055
517 tristate 528 tristate
518 529
530config SND_SOC_DIO2125
531 tristate "Dioo DIO2125 Amplifier"
532 select GPIOLIB
533
519config SND_SOC_DMIC 534config SND_SOC_DMIC
520 tristate 535 tristate
521 536
@@ -525,6 +540,9 @@ config SND_SOC_HDMI_CODEC
525 select SND_PCM_IEC958 540 select SND_PCM_IEC958
526 select HDMI 541 select HDMI
527 542
543config SND_SOC_ES7134
544 tristate "Everest Semi ES7134 CODEC"
545
528config SND_SOC_ES8328 546config SND_SOC_ES8328
529 tristate 547 tristate
530 548
@@ -588,6 +606,10 @@ config SND_SOC_MAX98925
588config SND_SOC_MAX98926 606config SND_SOC_MAX98926
589 tristate 607 tristate
590 608
609config SND_SOC_MAX98927
610 tristate "Maxim Integrated MAX98927 Speaker Amplifier"
611 depends on I2C
612
591config SND_SOC_MAX9850 613config SND_SOC_MAX9850
592 tristate 614 tristate
593 615
@@ -1116,6 +1138,10 @@ config SND_SOC_NAU8810
1116 tristate "Nuvoton Technology Corporation NAU88C10 CODEC" 1138 tristate "Nuvoton Technology Corporation NAU88C10 CODEC"
1117 depends on I2C 1139 depends on I2C
1118 1140
1141config SND_SOC_NAU8824
1142 tristate "Nuvoton Technology Corporation NAU88L24 CODEC"
1143 depends on I2C
1144
1119config SND_SOC_NAU8825 1145config SND_SOC_NAU8825
1120 tristate 1146 tristate
1121 1147
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 1796cb987e71..28a63fdaf982 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -39,6 +39,7 @@ snd-soc-cq93vc-objs := cq93vc.o
39snd-soc-cs35l32-objs := cs35l32.o 39snd-soc-cs35l32-objs := cs35l32.o
40snd-soc-cs35l33-objs := cs35l33.o 40snd-soc-cs35l33-objs := cs35l33.o
41snd-soc-cs35l34-objs := cs35l34.o 41snd-soc-cs35l34-objs := cs35l34.o
42snd-soc-cs35l35-objs := cs35l35.o
42snd-soc-cs42l42-objs := cs42l42.o 43snd-soc-cs42l42-objs := cs42l42.o
43snd-soc-cs42l51-objs := cs42l51.o 44snd-soc-cs42l51-objs := cs42l51.o
44snd-soc-cs42l51-i2c-objs := cs42l51-i2c.o 45snd-soc-cs42l51-i2c-objs := cs42l51-i2c.o
@@ -63,6 +64,7 @@ snd-soc-da7219-objs := da7219.o da7219-aad.o
63snd-soc-da732x-objs := da732x.o 64snd-soc-da732x-objs := da732x.o
64snd-soc-da9055-objs := da9055.o 65snd-soc-da9055-objs := da9055.o
65snd-soc-dmic-objs := dmic.o 66snd-soc-dmic-objs := dmic.o
67snd-soc-es7134-objs := es7134.o
66snd-soc-es8328-objs := es8328.o 68snd-soc-es8328-objs := es8328.o
67snd-soc-es8328-i2c-objs := es8328-i2c.o 69snd-soc-es8328-i2c-objs := es8328-i2c.o
68snd-soc-es8328-spi-objs := es8328-spi.o 70snd-soc-es8328-spi-objs := es8328-spi.o
@@ -84,6 +86,7 @@ snd-soc-max98371-objs := max98371.o
84snd-soc-max9867-objs := max9867.o 86snd-soc-max9867-objs := max9867.o
85snd-soc-max98925-objs := max98925.o 87snd-soc-max98925-objs := max98925.o
86snd-soc-max98926-objs := max98926.o 88snd-soc-max98926-objs := max98926.o
89snd-soc-max98927-objs := max98927.o
87snd-soc-max9850-objs := max9850.o 90snd-soc-max9850-objs := max9850.o
88snd-soc-max9860-objs := max9860.o 91snd-soc-max9860-objs := max9860.o
89snd-soc-mc13783-objs := mc13783.o 92snd-soc-mc13783-objs := mc13783.o
@@ -92,6 +95,7 @@ snd-soc-msm8916-analog-objs := msm8916-wcd-analog.o
92snd-soc-msm8916-digital-objs := msm8916-wcd-digital.o 95snd-soc-msm8916-digital-objs := msm8916-wcd-digital.o
93snd-soc-nau8540-objs := nau8540.o 96snd-soc-nau8540-objs := nau8540.o
94snd-soc-nau8810-objs := nau8810.o 97snd-soc-nau8810-objs := nau8810.o
98snd-soc-nau8824-objs := nau8824.o
95snd-soc-nau8825-objs := nau8825.o 99snd-soc-nau8825-objs := nau8825.o
96snd-soc-hdmi-codec-objs := hdmi-codec.o 100snd-soc-hdmi-codec-objs := hdmi-codec.o
97snd-soc-pcm1681-objs := pcm1681.o 101snd-soc-pcm1681-objs := pcm1681.o
@@ -221,6 +225,7 @@ snd-soc-wm9712-objs := wm9712.o
221snd-soc-wm9713-objs := wm9713.o 225snd-soc-wm9713-objs := wm9713.o
222snd-soc-wm-hubs-objs := wm_hubs.o 226snd-soc-wm-hubs-objs := wm_hubs.o
223# Amp 227# Amp
228snd-soc-dio2125-objs := dio2125.o
224snd-soc-max9877-objs := max9877.o 229snd-soc-max9877-objs := max9877.o
225snd-soc-max98504-objs := max98504.o 230snd-soc-max98504-objs := max98504.o
226snd-soc-tpa6130a2-objs := tpa6130a2.o 231snd-soc-tpa6130a2-objs := tpa6130a2.o
@@ -269,6 +274,7 @@ obj-$(CONFIG_SND_SOC_CQ0093VC) += snd-soc-cq93vc.o
269obj-$(CONFIG_SND_SOC_CS35L32) += snd-soc-cs35l32.o 274obj-$(CONFIG_SND_SOC_CS35L32) += snd-soc-cs35l32.o
270obj-$(CONFIG_SND_SOC_CS35L33) += snd-soc-cs35l33.o 275obj-$(CONFIG_SND_SOC_CS35L33) += snd-soc-cs35l33.o
271obj-$(CONFIG_SND_SOC_CS35L34) += snd-soc-cs35l34.o 276obj-$(CONFIG_SND_SOC_CS35L34) += snd-soc-cs35l34.o
277obj-$(CONFIG_SND_SOC_CS35L35) += snd-soc-cs35l35.o
272obj-$(CONFIG_SND_SOC_CS42L42) += snd-soc-cs42l42.o 278obj-$(CONFIG_SND_SOC_CS42L42) += snd-soc-cs42l42.o
273obj-$(CONFIG_SND_SOC_CS42L51) += snd-soc-cs42l51.o 279obj-$(CONFIG_SND_SOC_CS42L51) += snd-soc-cs42l51.o
274obj-$(CONFIG_SND_SOC_CS42L51_I2C) += snd-soc-cs42l51-i2c.o 280obj-$(CONFIG_SND_SOC_CS42L51_I2C) += snd-soc-cs42l51-i2c.o
@@ -293,6 +299,7 @@ obj-$(CONFIG_SND_SOC_DA7219) += snd-soc-da7219.o
293obj-$(CONFIG_SND_SOC_DA732X) += snd-soc-da732x.o 299obj-$(CONFIG_SND_SOC_DA732X) += snd-soc-da732x.o
294obj-$(CONFIG_SND_SOC_DA9055) += snd-soc-da9055.o 300obj-$(CONFIG_SND_SOC_DA9055) += snd-soc-da9055.o
295obj-$(CONFIG_SND_SOC_DMIC) += snd-soc-dmic.o 301obj-$(CONFIG_SND_SOC_DMIC) += snd-soc-dmic.o
302obj-$(CONFIG_SND_SOC_ES7134) += snd-soc-es7134.o
296obj-$(CONFIG_SND_SOC_ES8328) += snd-soc-es8328.o 303obj-$(CONFIG_SND_SOC_ES8328) += snd-soc-es8328.o
297obj-$(CONFIG_SND_SOC_ES8328_I2C)+= snd-soc-es8328-i2c.o 304obj-$(CONFIG_SND_SOC_ES8328_I2C)+= snd-soc-es8328-i2c.o
298obj-$(CONFIG_SND_SOC_ES8328_SPI)+= snd-soc-es8328-spi.o 305obj-$(CONFIG_SND_SOC_ES8328_SPI)+= snd-soc-es8328-spi.o
@@ -313,6 +320,7 @@ obj-$(CONFIG_SND_SOC_MAX98357A) += snd-soc-max98357a.o
313obj-$(CONFIG_SND_SOC_MAX9867) += snd-soc-max9867.o 320obj-$(CONFIG_SND_SOC_MAX9867) += snd-soc-max9867.o
314obj-$(CONFIG_SND_SOC_MAX98925) += snd-soc-max98925.o 321obj-$(CONFIG_SND_SOC_MAX98925) += snd-soc-max98925.o
315obj-$(CONFIG_SND_SOC_MAX98926) += snd-soc-max98926.o 322obj-$(CONFIG_SND_SOC_MAX98926) += snd-soc-max98926.o
323obj-$(CONFIG_SND_SOC_MAX98927) += snd-soc-max98927.o
316obj-$(CONFIG_SND_SOC_MAX9850) += snd-soc-max9850.o 324obj-$(CONFIG_SND_SOC_MAX9850) += snd-soc-max9850.o
317obj-$(CONFIG_SND_SOC_MAX9860) += snd-soc-max9860.o 325obj-$(CONFIG_SND_SOC_MAX9860) += snd-soc-max9860.o
318obj-$(CONFIG_SND_SOC_MC13783) += snd-soc-mc13783.o 326obj-$(CONFIG_SND_SOC_MC13783) += snd-soc-mc13783.o
@@ -321,6 +329,7 @@ obj-$(CONFIG_SND_SOC_MSM8916_WCD_ANALOG) +=snd-soc-msm8916-analog.o
321obj-$(CONFIG_SND_SOC_MSM8916_WCD_DIGITAL) +=snd-soc-msm8916-digital.o 329obj-$(CONFIG_SND_SOC_MSM8916_WCD_DIGITAL) +=snd-soc-msm8916-digital.o
322obj-$(CONFIG_SND_SOC_NAU8540) += snd-soc-nau8540.o 330obj-$(CONFIG_SND_SOC_NAU8540) += snd-soc-nau8540.o
323obj-$(CONFIG_SND_SOC_NAU8810) += snd-soc-nau8810.o 331obj-$(CONFIG_SND_SOC_NAU8810) += snd-soc-nau8810.o
332obj-$(CONFIG_SND_SOC_NAU8824) += snd-soc-nau8824.o
324obj-$(CONFIG_SND_SOC_NAU8825) += snd-soc-nau8825.o 333obj-$(CONFIG_SND_SOC_NAU8825) += snd-soc-nau8825.o
325obj-$(CONFIG_SND_SOC_HDMI_CODEC) += snd-soc-hdmi-codec.o 334obj-$(CONFIG_SND_SOC_HDMI_CODEC) += snd-soc-hdmi-codec.o
326obj-$(CONFIG_SND_SOC_PCM1681) += snd-soc-pcm1681.o 335obj-$(CONFIG_SND_SOC_PCM1681) += snd-soc-pcm1681.o
@@ -448,6 +457,7 @@ obj-$(CONFIG_SND_SOC_WM_ADSP) += snd-soc-wm-adsp.o
448obj-$(CONFIG_SND_SOC_WM_HUBS) += snd-soc-wm-hubs.o 457obj-$(CONFIG_SND_SOC_WM_HUBS) += snd-soc-wm-hubs.o
449 458
450# Amp 459# Amp
460obj-$(CONFIG_SND_SOC_DIO2125) += snd-soc-dio2125.o
451obj-$(CONFIG_SND_SOC_MAX9877) += snd-soc-max9877.o 461obj-$(CONFIG_SND_SOC_MAX9877) += snd-soc-max9877.o
452obj-$(CONFIG_SND_SOC_MAX98504) += snd-soc-max98504.o 462obj-$(CONFIG_SND_SOC_MAX98504) += snd-soc-max98504.o
453obj-$(CONFIG_SND_SOC_TPA6130A2) += snd-soc-tpa6130a2.o 463obj-$(CONFIG_SND_SOC_TPA6130A2) += snd-soc-tpa6130a2.o
diff --git a/sound/soc/codecs/ak4613.c b/sound/soc/codecs/ak4613.c
index e819dd8c82fd..b2dfddead227 100644
--- a/sound/soc/codecs/ak4613.c
+++ b/sound/soc/codecs/ak4613.c
@@ -75,6 +75,12 @@
75#define DFS_DOUBLE_SPEED (1 << 2) 75#define DFS_DOUBLE_SPEED (1 << 2)
76#define DFS_QUAD_SPEED (2 << 2) 76#define DFS_QUAD_SPEED (2 << 2)
77 77
78/* ICTRL */
79#define ICTRL_MASK (0x3)
80
81/* OCTRL */
82#define OCTRL_MASK (0x3F)
83
78struct ak4613_formats { 84struct ak4613_formats {
79 unsigned int width; 85 unsigned int width;
80 unsigned int fmt; 86 unsigned int fmt;
@@ -365,8 +371,8 @@ static int ak4613_dai_hw_params(struct snd_pcm_substream *substream,
365 snd_soc_update_bits(codec, CTRL1, FMT_MASK, fmt_ctrl); 371 snd_soc_update_bits(codec, CTRL1, FMT_MASK, fmt_ctrl);
366 snd_soc_update_bits(codec, CTRL2, DFS_MASK, ctrl2); 372 snd_soc_update_bits(codec, CTRL2, DFS_MASK, ctrl2);
367 373
368 snd_soc_write(codec, ICTRL, priv->ic); 374 snd_soc_update_bits(codec, ICTRL, ICTRL_MASK, priv->ic);
369 snd_soc_write(codec, OCTRL, priv->oc); 375 snd_soc_update_bits(codec, OCTRL, OCTRL_MASK, priv->oc);
370 376
371hw_params_end: 377hw_params_end:
372 if (ret < 0) 378 if (ret < 0)
diff --git a/sound/soc/codecs/cs35l35.c b/sound/soc/codecs/cs35l35.c
new file mode 100644
index 000000000000..f8aef5869b03
--- /dev/null
+++ b/sound/soc/codecs/cs35l35.c
@@ -0,0 +1,1580 @@
1/*
2 * cs35l35.c -- CS35L35 ALSA SoC audio driver
3 *
4 * Copyright 2017 Cirrus Logic, Inc.
5 *
6 * Author: Brian Austin <brian.austin@cirrus.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/version.h>
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/delay.h>
20#include <linux/i2c.h>
21#include <linux/slab.h>
22#include <linux/platform_device.h>
23#include <linux/regulator/consumer.h>
24#include <linux/gpio/consumer.h>
25#include <linux/of_device.h>
26#include <linux/of_gpio.h>
27#include <linux/regmap.h>
28#include <sound/core.h>
29#include <sound/pcm.h>
30#include <sound/pcm_params.h>
31#include <sound/soc.h>
32#include <sound/soc-dapm.h>
33#include <linux/gpio.h>
34#include <sound/initval.h>
35#include <sound/tlv.h>
36#include <sound/cs35l35.h>
37#include <linux/of_irq.h>
38#include <linux/completion.h>
39
40#include "cs35l35.h"
41
42/*
43 * Some fields take zero as a valid value so use a high bit flag that won't
44 * get written to the device to mark those.
45 */
46#define CS35L35_VALID_PDATA 0x80000000
47
48static const struct reg_default cs35l35_reg[] = {
49 {CS35L35_PWRCTL1, 0x01},
50 {CS35L35_PWRCTL2, 0x11},
51 {CS35L35_PWRCTL3, 0x00},
52 {CS35L35_CLK_CTL1, 0x04},
53 {CS35L35_CLK_CTL2, 0x12},
54 {CS35L35_CLK_CTL3, 0xCF},
55 {CS35L35_SP_FMT_CTL1, 0x20},
56 {CS35L35_SP_FMT_CTL2, 0x00},
57 {CS35L35_SP_FMT_CTL3, 0x02},
58 {CS35L35_MAG_COMP_CTL, 0x00},
59 {CS35L35_AMP_INP_DRV_CTL, 0x01},
60 {CS35L35_AMP_DIG_VOL_CTL, 0x12},
61 {CS35L35_AMP_DIG_VOL, 0x00},
62 {CS35L35_ADV_DIG_VOL, 0x00},
63 {CS35L35_PROTECT_CTL, 0x06},
64 {CS35L35_AMP_GAIN_AUD_CTL, 0x13},
65 {CS35L35_AMP_GAIN_PDM_CTL, 0x00},
66 {CS35L35_AMP_GAIN_ADV_CTL, 0x00},
67 {CS35L35_GPI_CTL, 0x00},
68 {CS35L35_BST_CVTR_V_CTL, 0x00},
69 {CS35L35_BST_PEAK_I, 0x07},
70 {CS35L35_BST_RAMP_CTL, 0x85},
71 {CS35L35_BST_CONV_COEF_1, 0x24},
72 {CS35L35_BST_CONV_COEF_2, 0x24},
73 {CS35L35_BST_CONV_SLOPE_COMP, 0x4E},
74 {CS35L35_BST_CONV_SW_FREQ, 0x04},
75 {CS35L35_CLASS_H_CTL, 0x0B},
76 {CS35L35_CLASS_H_HEADRM_CTL, 0x0B},
77 {CS35L35_CLASS_H_RELEASE_RATE, 0x08},
78 {CS35L35_CLASS_H_FET_DRIVE_CTL, 0x41},
79 {CS35L35_CLASS_H_VP_CTL, 0xC5},
80 {CS35L35_VPBR_CTL, 0x0A},
81 {CS35L35_VPBR_VOL_CTL, 0x90},
82 {CS35L35_VPBR_TIMING_CTL, 0x6A},
83 {CS35L35_VPBR_MODE_VOL_CTL, 0x00},
84 {CS35L35_SPKR_MON_CTL, 0xC0},
85 {CS35L35_IMON_SCALE_CTL, 0x30},
86 {CS35L35_AUDIN_RXLOC_CTL, 0x00},
87 {CS35L35_ADVIN_RXLOC_CTL, 0x80},
88 {CS35L35_VMON_TXLOC_CTL, 0x00},
89 {CS35L35_IMON_TXLOC_CTL, 0x80},
90 {CS35L35_VPMON_TXLOC_CTL, 0x04},
91 {CS35L35_VBSTMON_TXLOC_CTL, 0x84},
92 {CS35L35_VPBR_STATUS_TXLOC_CTL, 0x04},
93 {CS35L35_ZERO_FILL_LOC_CTL, 0x00},
94 {CS35L35_AUDIN_DEPTH_CTL, 0x0F},
95 {CS35L35_SPKMON_DEPTH_CTL, 0x0F},
96 {CS35L35_SUPMON_DEPTH_CTL, 0x0F},
97 {CS35L35_ZEROFILL_DEPTH_CTL, 0x00},
98 {CS35L35_MULT_DEV_SYNCH1, 0x02},
99 {CS35L35_MULT_DEV_SYNCH2, 0x80},
100 {CS35L35_PROT_RELEASE_CTL, 0x00},
101 {CS35L35_DIAG_MODE_REG_LOCK, 0x00},
102 {CS35L35_DIAG_MODE_CTL_1, 0x40},
103 {CS35L35_DIAG_MODE_CTL_2, 0x00},
104 {CS35L35_INT_MASK_1, 0xFF},
105 {CS35L35_INT_MASK_2, 0xFF},
106 {CS35L35_INT_MASK_3, 0xFF},
107 {CS35L35_INT_MASK_4, 0xFF},
108
109};
110
111static bool cs35l35_volatile_register(struct device *dev, unsigned int reg)
112{
113 switch (reg) {
114 case CS35L35_INT_STATUS_1:
115 case CS35L35_INT_STATUS_2:
116 case CS35L35_INT_STATUS_3:
117 case CS35L35_INT_STATUS_4:
118 case CS35L35_PLL_STATUS:
119 case CS35L35_OTP_TRIM_STATUS:
120 return true;
121 default:
122 return false;
123 }
124}
125
126static bool cs35l35_readable_register(struct device *dev, unsigned int reg)
127{
128 switch (reg) {
129 case CS35L35_DEVID_AB ... CS35L35_PWRCTL3:
130 case CS35L35_CLK_CTL1 ... CS35L35_SP_FMT_CTL3:
131 case CS35L35_MAG_COMP_CTL ... CS35L35_AMP_GAIN_AUD_CTL:
132 case CS35L35_AMP_GAIN_PDM_CTL ... CS35L35_BST_PEAK_I:
133 case CS35L35_BST_RAMP_CTL ... CS35L35_BST_CONV_SW_FREQ:
134 case CS35L35_CLASS_H_CTL ... CS35L35_CLASS_H_VP_CTL:
135 case CS35L35_CLASS_H_STATUS:
136 case CS35L35_VPBR_CTL ... CS35L35_VPBR_MODE_VOL_CTL:
137 case CS35L35_VPBR_ATTEN_STATUS:
138 case CS35L35_SPKR_MON_CTL:
139 case CS35L35_IMON_SCALE_CTL ... CS35L35_ZEROFILL_DEPTH_CTL:
140 case CS35L35_MULT_DEV_SYNCH1 ... CS35L35_PROT_RELEASE_CTL:
141 case CS35L35_DIAG_MODE_REG_LOCK ... CS35L35_DIAG_MODE_CTL_2:
142 case CS35L35_INT_MASK_1 ... CS35L35_PLL_STATUS:
143 case CS35L35_OTP_TRIM_STATUS:
144 return true;
145 default:
146 return false;
147 }
148}
149
150static bool cs35l35_precious_register(struct device *dev, unsigned int reg)
151{
152 switch (reg) {
153 case CS35L35_INT_STATUS_1:
154 case CS35L35_INT_STATUS_2:
155 case CS35L35_INT_STATUS_3:
156 case CS35L35_INT_STATUS_4:
157 case CS35L35_PLL_STATUS:
158 case CS35L35_OTP_TRIM_STATUS:
159 return true;
160 default:
161 return false;
162 }
163}
164
165static int cs35l35_wait_for_pdn(struct cs35l35_private *cs35l35)
166{
167 int ret;
168
169 if (cs35l35->pdata.ext_bst) {
170 usleep_range(5000, 5500);
171 return 0;
172 }
173
174 reinit_completion(&cs35l35->pdn_done);
175
176 ret = wait_for_completion_timeout(&cs35l35->pdn_done,
177 msecs_to_jiffies(100));
178 if (ret == 0) {
179 dev_err(cs35l35->dev, "PDN_DONE did not complete\n");
180 return -ETIMEDOUT;
181 }
182
183 return 0;
184}
185
186static int cs35l35_sdin_event(struct snd_soc_dapm_widget *w,
187 struct snd_kcontrol *kcontrol, int event)
188{
189 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
190 struct cs35l35_private *cs35l35 = snd_soc_codec_get_drvdata(codec);
191 int ret = 0;
192
193 switch (event) {
194 case SND_SOC_DAPM_PRE_PMU:
195 regmap_update_bits(cs35l35->regmap, CS35L35_CLK_CTL1,
196 CS35L35_MCLK_DIS_MASK,
197 0 << CS35L35_MCLK_DIS_SHIFT);
198 regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL1,
199 CS35L35_DISCHG_FILT_MASK,
200 0 << CS35L35_DISCHG_FILT_SHIFT);
201 regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL1,
202 CS35L35_PDN_ALL_MASK, 0);
203 break;
204 case SND_SOC_DAPM_POST_PMD:
205 regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL1,
206 CS35L35_DISCHG_FILT_MASK,
207 1 << CS35L35_DISCHG_FILT_SHIFT);
208 regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL1,
209 CS35L35_PDN_ALL_MASK, 1);
210
211 /* Already muted, so disable volume ramp for faster shutdown */
212 regmap_update_bits(cs35l35->regmap, CS35L35_AMP_DIG_VOL_CTL,
213 CS35L35_AMP_DIGSFT_MASK, 0);
214
215 ret = cs35l35_wait_for_pdn(cs35l35);
216
217 regmap_update_bits(cs35l35->regmap, CS35L35_CLK_CTL1,
218 CS35L35_MCLK_DIS_MASK,
219 1 << CS35L35_MCLK_DIS_SHIFT);
220
221 regmap_update_bits(cs35l35->regmap, CS35L35_AMP_DIG_VOL_CTL,
222 CS35L35_AMP_DIGSFT_MASK,
223 1 << CS35L35_AMP_DIGSFT_SHIFT);
224 break;
225 default:
226 dev_err(codec->dev, "Invalid event = 0x%x\n", event);
227 ret = -EINVAL;
228 }
229 return ret;
230}
231
232static int cs35l35_main_amp_event(struct snd_soc_dapm_widget *w,
233 struct snd_kcontrol *kcontrol, int event)
234{
235 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
236 struct cs35l35_private *cs35l35 = snd_soc_codec_get_drvdata(codec);
237 unsigned int reg[4];
238 int i;
239
240 switch (event) {
241 case SND_SOC_DAPM_PRE_PMU:
242 if (cs35l35->pdata.bst_pdn_fet_on)
243 regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL2,
244 CS35L35_PDN_BST_MASK,
245 0 << CS35L35_PDN_BST_FETON_SHIFT);
246 else
247 regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL2,
248 CS35L35_PDN_BST_MASK,
249 0 << CS35L35_PDN_BST_FETOFF_SHIFT);
250 break;
251 case SND_SOC_DAPM_POST_PMU:
252 usleep_range(5000, 5100);
253 /* If in PDM mode we must use VP for Voltage control */
254 if (cs35l35->pdm_mode)
255 regmap_update_bits(cs35l35->regmap,
256 CS35L35_BST_CVTR_V_CTL,
257 CS35L35_BST_CTL_MASK,
258 0 << CS35L35_BST_CTL_SHIFT);
259
260 regmap_update_bits(cs35l35->regmap, CS35L35_PROTECT_CTL,
261 CS35L35_AMP_MUTE_MASK, 0);
262
263 for (i = 0; i < 2; i++)
264 regmap_bulk_read(cs35l35->regmap, CS35L35_INT_STATUS_1,
265 &reg, ARRAY_SIZE(reg));
266
267 break;
268 case SND_SOC_DAPM_PRE_PMD:
269 regmap_update_bits(cs35l35->regmap, CS35L35_PROTECT_CTL,
270 CS35L35_AMP_MUTE_MASK,
271 1 << CS35L35_AMP_MUTE_SHIFT);
272 if (cs35l35->pdata.bst_pdn_fet_on)
273 regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL2,
274 CS35L35_PDN_BST_MASK,
275 1 << CS35L35_PDN_BST_FETON_SHIFT);
276 else
277 regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL2,
278 CS35L35_PDN_BST_MASK,
279 1 << CS35L35_PDN_BST_FETOFF_SHIFT);
280 break;
281 case SND_SOC_DAPM_POST_PMD:
282 usleep_range(5000, 5100);
283 /*
284 * If PDM mode we should switch back to pdata value
285 * for Voltage control when we go down
286 */
287 if (cs35l35->pdm_mode)
288 regmap_update_bits(cs35l35->regmap,
289 CS35L35_BST_CVTR_V_CTL,
290 CS35L35_BST_CTL_MASK,
291 cs35l35->pdata.bst_vctl
292 << CS35L35_BST_CTL_SHIFT);
293
294 break;
295 default:
296 dev_err(codec->dev, "Invalid event = 0x%x\n", event);
297 }
298 return 0;
299}
300
301static DECLARE_TLV_DB_SCALE(amp_gain_tlv, 0, 1, 1);
302static DECLARE_TLV_DB_SCALE(dig_vol_tlv, -10200, 50, 0);
303
304static const struct snd_kcontrol_new cs35l35_aud_controls[] = {
305 SOC_SINGLE_SX_TLV("Digital Audio Volume", CS35L35_AMP_DIG_VOL,
306 0, 0x34, 0xE4, dig_vol_tlv),
307 SOC_SINGLE_TLV("Analog Audio Volume", CS35L35_AMP_GAIN_AUD_CTL, 0, 19, 0,
308 amp_gain_tlv),
309 SOC_SINGLE_TLV("PDM Volume", CS35L35_AMP_GAIN_PDM_CTL, 0, 19, 0,
310 amp_gain_tlv),
311};
312
313static const struct snd_kcontrol_new cs35l35_adv_controls[] = {
314 SOC_SINGLE_SX_TLV("Digital Advisory Volume", CS35L35_ADV_DIG_VOL,
315 0, 0x34, 0xE4, dig_vol_tlv),
316 SOC_SINGLE_TLV("Analog Advisory Volume", CS35L35_AMP_GAIN_ADV_CTL, 0, 19, 0,
317 amp_gain_tlv),
318};
319
320static const struct snd_soc_dapm_widget cs35l35_dapm_widgets[] = {
321 SND_SOC_DAPM_AIF_IN_E("SDIN", NULL, 0, CS35L35_PWRCTL3, 1, 1,
322 cs35l35_sdin_event, SND_SOC_DAPM_PRE_PMU |
323 SND_SOC_DAPM_POST_PMD),
324 SND_SOC_DAPM_AIF_OUT("SDOUT", NULL, 0, CS35L35_PWRCTL3, 2, 1),
325
326 SND_SOC_DAPM_OUTPUT("SPK"),
327
328 SND_SOC_DAPM_INPUT("VP"),
329 SND_SOC_DAPM_INPUT("VBST"),
330 SND_SOC_DAPM_INPUT("ISENSE"),
331 SND_SOC_DAPM_INPUT("VSENSE"),
332
333 SND_SOC_DAPM_ADC("VMON ADC", NULL, CS35L35_PWRCTL2, 7, 1),
334 SND_SOC_DAPM_ADC("IMON ADC", NULL, CS35L35_PWRCTL2, 6, 1),
335 SND_SOC_DAPM_ADC("VPMON ADC", NULL, CS35L35_PWRCTL3, 3, 1),
336 SND_SOC_DAPM_ADC("VBSTMON ADC", NULL, CS35L35_PWRCTL3, 4, 1),
337 SND_SOC_DAPM_ADC("CLASS H", NULL, CS35L35_PWRCTL2, 5, 1),
338
339 SND_SOC_DAPM_OUT_DRV_E("Main AMP", CS35L35_PWRCTL2, 0, 1, NULL, 0,
340 cs35l35_main_amp_event, SND_SOC_DAPM_PRE_PMU |
341 SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_POST_PMU |
342 SND_SOC_DAPM_PRE_PMD),
343};
344
345static const struct snd_soc_dapm_route cs35l35_audio_map[] = {
346 {"VPMON ADC", NULL, "VP"},
347 {"VBSTMON ADC", NULL, "VBST"},
348 {"IMON ADC", NULL, "ISENSE"},
349 {"VMON ADC", NULL, "VSENSE"},
350 {"SDOUT", NULL, "IMON ADC"},
351 {"SDOUT", NULL, "VMON ADC"},
352 {"SDOUT", NULL, "VBSTMON ADC"},
353 {"SDOUT", NULL, "VPMON ADC"},
354 {"AMP Capture", NULL, "SDOUT"},
355
356 {"SDIN", NULL, "AMP Playback"},
357 {"CLASS H", NULL, "SDIN"},
358 {"Main AMP", NULL, "CLASS H"},
359 {"SPK", NULL, "Main AMP"},
360};
361
362static int cs35l35_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
363{
364 struct snd_soc_codec *codec = codec_dai->codec;
365 struct cs35l35_private *cs35l35 = snd_soc_codec_get_drvdata(codec);
366
367 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
368 case SND_SOC_DAIFMT_CBM_CFM:
369 regmap_update_bits(cs35l35->regmap, CS35L35_CLK_CTL1,
370 CS35L35_MS_MASK, 1 << CS35L35_MS_SHIFT);
371 cs35l35->slave_mode = false;
372 break;
373 case SND_SOC_DAIFMT_CBS_CFS:
374 regmap_update_bits(cs35l35->regmap, CS35L35_CLK_CTL1,
375 CS35L35_MS_MASK, 0 << CS35L35_MS_SHIFT);
376 cs35l35->slave_mode = true;
377 break;
378 default:
379 return -EINVAL;
380 }
381
382 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
383 case SND_SOC_DAIFMT_I2S:
384 cs35l35->i2s_mode = true;
385 cs35l35->pdm_mode = false;
386 break;
387 case SND_SOC_DAIFMT_PDM:
388 cs35l35->pdm_mode = true;
389 cs35l35->i2s_mode = false;
390 break;
391 default:
392 return -EINVAL;
393 }
394
395 return 0;
396}
397
398struct cs35l35_sysclk_config {
399 int sysclk;
400 int srate;
401 u8 clk_cfg;
402};
403
404static struct cs35l35_sysclk_config cs35l35_clk_ctl[] = {
405
406 /* SYSCLK, Sample Rate, Serial Port Cfg */
407 {5644800, 44100, 0x00},
408 {5644800, 88200, 0x40},
409 {6144000, 48000, 0x10},
410 {6144000, 96000, 0x50},
411 {11289600, 44100, 0x01},
412 {11289600, 88200, 0x41},
413 {11289600, 176400, 0x81},
414 {12000000, 44100, 0x03},
415 {12000000, 48000, 0x13},
416 {12000000, 88200, 0x43},
417 {12000000, 96000, 0x53},
418 {12000000, 176400, 0x83},
419 {12000000, 192000, 0x93},
420 {12288000, 48000, 0x11},
421 {12288000, 96000, 0x51},
422 {12288000, 192000, 0x91},
423 {13000000, 44100, 0x07},
424 {13000000, 48000, 0x17},
425 {13000000, 88200, 0x47},
426 {13000000, 96000, 0x57},
427 {13000000, 176400, 0x87},
428 {13000000, 192000, 0x97},
429 {22579200, 44100, 0x02},
430 {22579200, 88200, 0x42},
431 {22579200, 176400, 0x82},
432 {24000000, 44100, 0x0B},
433 {24000000, 48000, 0x1B},
434 {24000000, 88200, 0x4B},
435 {24000000, 96000, 0x5B},
436 {24000000, 176400, 0x8B},
437 {24000000, 192000, 0x9B},
438 {24576000, 48000, 0x12},
439 {24576000, 96000, 0x52},
440 {24576000, 192000, 0x92},
441 {26000000, 44100, 0x0F},
442 {26000000, 48000, 0x1F},
443 {26000000, 88200, 0x4F},
444 {26000000, 96000, 0x5F},
445 {26000000, 176400, 0x8F},
446 {26000000, 192000, 0x9F},
447};
448
449static int cs35l35_get_clk_config(int sysclk, int srate)
450{
451 int i;
452
453 for (i = 0; i < ARRAY_SIZE(cs35l35_clk_ctl); i++) {
454 if (cs35l35_clk_ctl[i].sysclk == sysclk &&
455 cs35l35_clk_ctl[i].srate == srate)
456 return cs35l35_clk_ctl[i].clk_cfg;
457 }
458 return -EINVAL;
459}
460
461static int cs35l35_hw_params(struct snd_pcm_substream *substream,
462 struct snd_pcm_hw_params *params,
463 struct snd_soc_dai *dai)
464{
465 struct snd_soc_codec *codec = dai->codec;
466 struct cs35l35_private *cs35l35 = snd_soc_codec_get_drvdata(codec);
467 struct classh_cfg *classh = &cs35l35->pdata.classh_algo;
468 int srate = params_rate(params);
469 int ret = 0;
470 u8 sp_sclks;
471 int audin_format;
472 int errata_chk;
473
474 int clk_ctl = cs35l35_get_clk_config(cs35l35->sysclk, srate);
475
476 if (clk_ctl < 0) {
477 dev_err(codec->dev, "Invalid CLK:Rate %d:%d\n",
478 cs35l35->sysclk, srate);
479 return -EINVAL;
480 }
481
482 ret = regmap_update_bits(cs35l35->regmap, CS35L35_CLK_CTL2,
483 CS35L35_CLK_CTL2_MASK, clk_ctl);
484 if (ret != 0) {
485 dev_err(codec->dev, "Failed to set port config %d\n", ret);
486 return ret;
487 }
488
489 /*
490 * Rev A0 Errata
491 * When configured for the weak-drive detection path (CH_WKFET_DIS = 0)
492 * the Class H algorithm does not enable weak-drive operation for
493 * nonzero values of CH_WKFET_DELAY if SP_RATE = 01 or 10
494 */
495 errata_chk = clk_ctl & CS35L35_SP_RATE_MASK;
496
497 if (classh->classh_wk_fet_disable == 0x00 &&
498 (errata_chk == 0x01 || errata_chk == 0x03)) {
499 ret = regmap_update_bits(cs35l35->regmap,
500 CS35L35_CLASS_H_FET_DRIVE_CTL,
501 CS35L35_CH_WKFET_DEL_MASK,
502 0 << CS35L35_CH_WKFET_DEL_SHIFT);
503 if (ret != 0) {
504 dev_err(codec->dev, "Failed to set fet config %d\n",
505 ret);
506 return ret;
507 }
508 }
509
510 /*
511 * You can pull more Monitor data from the SDOUT pin than going to SDIN
512 * Just make sure your SCLK is fast enough to fill the frame
513 */
514 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
515 switch (params_width(params)) {
516 case 8:
517 audin_format = CS35L35_SDIN_DEPTH_8;
518 break;
519 case 16:
520 audin_format = CS35L35_SDIN_DEPTH_16;
521 break;
522 case 24:
523 audin_format = CS35L35_SDIN_DEPTH_24;
524 break;
525 default:
526 dev_err(codec->dev, "Unsupported Width %d\n",
527 params_width(params));
528 return -EINVAL;
529 }
530 regmap_update_bits(cs35l35->regmap,
531 CS35L35_AUDIN_DEPTH_CTL,
532 CS35L35_AUDIN_DEPTH_MASK,
533 audin_format <<
534 CS35L35_AUDIN_DEPTH_SHIFT);
535 if (cs35l35->pdata.stereo) {
536 regmap_update_bits(cs35l35->regmap,
537 CS35L35_AUDIN_DEPTH_CTL,
538 CS35L35_ADVIN_DEPTH_MASK,
539 audin_format <<
540 CS35L35_ADVIN_DEPTH_SHIFT);
541 }
542 }
543
544 if (cs35l35->i2s_mode) {
545 /* We have to take the SCLK to derive num sclks
546 * to configure the CLOCK_CTL3 register correctly
547 */
548 if ((cs35l35->sclk / srate) % 4) {
549 dev_err(codec->dev, "Unsupported sclk/fs ratio %d:%d\n",
550 cs35l35->sclk, srate);
551 return -EINVAL;
552 }
553 sp_sclks = ((cs35l35->sclk / srate) / 4) - 1;
554
555 /* Only certain ratios are supported in I2S Slave Mode */
556 if (cs35l35->slave_mode) {
557 switch (sp_sclks) {
558 case CS35L35_SP_SCLKS_32FS:
559 case CS35L35_SP_SCLKS_48FS:
560 case CS35L35_SP_SCLKS_64FS:
561 break;
562 default:
563 dev_err(codec->dev, "ratio not supported\n");
564 return -EINVAL;
565 }
566 } else {
567 /* Only certain ratios supported in I2S MASTER Mode */
568 switch (sp_sclks) {
569 case CS35L35_SP_SCLKS_32FS:
570 case CS35L35_SP_SCLKS_64FS:
571 break;
572 default:
573 dev_err(codec->dev, "ratio not supported\n");
574 return -EINVAL;
575 }
576 }
577 ret = regmap_update_bits(cs35l35->regmap,
578 CS35L35_CLK_CTL3,
579 CS35L35_SP_SCLKS_MASK, sp_sclks <<
580 CS35L35_SP_SCLKS_SHIFT);
581 if (ret != 0) {
582 dev_err(codec->dev, "Failed to set fsclk %d\n", ret);
583 return ret;
584 }
585 }
586
587 return ret;
588}
589
590static const unsigned int cs35l35_src_rates[] = {
591 44100, 48000, 88200, 96000, 176400, 192000
592};
593
594static const struct snd_pcm_hw_constraint_list cs35l35_constraints = {
595 .count = ARRAY_SIZE(cs35l35_src_rates),
596 .list = cs35l35_src_rates,
597};
598
599static int cs35l35_pcm_startup(struct snd_pcm_substream *substream,
600 struct snd_soc_dai *dai)
601{
602 struct snd_soc_codec *codec = dai->codec;
603 struct cs35l35_private *cs35l35 = snd_soc_codec_get_drvdata(codec);
604
605 if (!substream->runtime)
606 return 0;
607
608 snd_pcm_hw_constraint_list(substream->runtime, 0,
609 SNDRV_PCM_HW_PARAM_RATE, &cs35l35_constraints);
610
611 regmap_update_bits(cs35l35->regmap, CS35L35_AMP_INP_DRV_CTL,
612 CS35L35_PDM_MODE_MASK,
613 0 << CS35L35_PDM_MODE_SHIFT);
614
615 return 0;
616}
617
618static const unsigned int cs35l35_pdm_rates[] = {
619 44100, 48000, 88200, 96000
620};
621
622static const struct snd_pcm_hw_constraint_list cs35l35_pdm_constraints = {
623 .count = ARRAY_SIZE(cs35l35_pdm_rates),
624 .list = cs35l35_pdm_rates,
625};
626
627static int cs35l35_pdm_startup(struct snd_pcm_substream *substream,
628 struct snd_soc_dai *dai)
629{
630 struct snd_soc_codec *codec = dai->codec;
631 struct cs35l35_private *cs35l35 = snd_soc_codec_get_drvdata(codec);
632
633 if (!substream->runtime)
634 return 0;
635
636 snd_pcm_hw_constraint_list(substream->runtime, 0,
637 SNDRV_PCM_HW_PARAM_RATE,
638 &cs35l35_pdm_constraints);
639
640 regmap_update_bits(cs35l35->regmap, CS35L35_AMP_INP_DRV_CTL,
641 CS35L35_PDM_MODE_MASK,
642 1 << CS35L35_PDM_MODE_SHIFT);
643
644 return 0;
645}
646
647static int cs35l35_dai_set_sysclk(struct snd_soc_dai *dai,
648 int clk_id, unsigned int freq, int dir)
649{
650 struct snd_soc_codec *codec = dai->codec;
651 struct cs35l35_private *cs35l35 = snd_soc_codec_get_drvdata(codec);
652
653 /* Need the SCLK Frequency regardless of sysclk source for I2S */
654 cs35l35->sclk = freq;
655
656 return 0;
657}
658
659static const struct snd_soc_dai_ops cs35l35_ops = {
660 .startup = cs35l35_pcm_startup,
661 .set_fmt = cs35l35_set_dai_fmt,
662 .hw_params = cs35l35_hw_params,
663 .set_sysclk = cs35l35_dai_set_sysclk,
664};
665
666static const struct snd_soc_dai_ops cs35l35_pdm_ops = {
667 .startup = cs35l35_pdm_startup,
668 .set_fmt = cs35l35_set_dai_fmt,
669 .hw_params = cs35l35_hw_params,
670};
671
672static struct snd_soc_dai_driver cs35l35_dai[] = {
673 {
674 .name = "cs35l35-pcm",
675 .id = 0,
676 .playback = {
677 .stream_name = "AMP Playback",
678 .channels_min = 1,
679 .channels_max = 8,
680 .rates = SNDRV_PCM_RATE_KNOT,
681 .formats = CS35L35_FORMATS,
682 },
683 .capture = {
684 .stream_name = "AMP Capture",
685 .channels_min = 1,
686 .channels_max = 8,
687 .rates = SNDRV_PCM_RATE_KNOT,
688 .formats = CS35L35_FORMATS,
689 },
690 .ops = &cs35l35_ops,
691 .symmetric_rates = 1,
692 },
693 {
694 .name = "cs35l35-pdm",
695 .id = 1,
696 .playback = {
697 .stream_name = "PDM Playback",
698 .channels_min = 1,
699 .channels_max = 2,
700 .rates = SNDRV_PCM_RATE_KNOT,
701 .formats = CS35L35_FORMATS,
702 },
703 .ops = &cs35l35_pdm_ops,
704 },
705};
706
707static int cs35l35_codec_set_sysclk(struct snd_soc_codec *codec,
708 int clk_id, int source, unsigned int freq,
709 int dir)
710{
711 struct cs35l35_private *cs35l35 = snd_soc_codec_get_drvdata(codec);
712 int clksrc;
713 int ret = 0;
714
715 switch (clk_id) {
716 case 0:
717 clksrc = CS35L35_CLK_SOURCE_MCLK;
718 break;
719 case 1:
720 clksrc = CS35L35_CLK_SOURCE_SCLK;
721 break;
722 case 2:
723 clksrc = CS35L35_CLK_SOURCE_PDM;
724 break;
725 default:
726 dev_err(codec->dev, "Invalid CLK Source\n");
727 return -EINVAL;
728 }
729
730 switch (freq) {
731 case 5644800:
732 case 6144000:
733 case 11289600:
734 case 12000000:
735 case 12288000:
736 case 13000000:
737 case 22579200:
738 case 24000000:
739 case 24576000:
740 case 26000000:
741 cs35l35->sysclk = freq;
742 break;
743 default:
744 dev_err(codec->dev, "Invalid CLK Frequency Input : %d\n", freq);
745 return -EINVAL;
746 }
747
748 ret = regmap_update_bits(cs35l35->regmap, CS35L35_CLK_CTL1,
749 CS35L35_CLK_SOURCE_MASK,
750 clksrc << CS35L35_CLK_SOURCE_SHIFT);
751 if (ret != 0) {
752 dev_err(codec->dev, "Failed to set sysclk %d\n", ret);
753 return ret;
754 }
755
756 return ret;
757}
758
759static int cs35l35_codec_probe(struct snd_soc_codec *codec)
760{
761 struct cs35l35_private *cs35l35 = snd_soc_codec_get_drvdata(codec);
762 struct classh_cfg *classh = &cs35l35->pdata.classh_algo;
763 struct monitor_cfg *monitor_config = &cs35l35->pdata.mon_cfg;
764 int ret;
765
766 /* Set Platform Data */
767 if (cs35l35->pdata.bst_vctl)
768 regmap_update_bits(cs35l35->regmap, CS35L35_BST_CVTR_V_CTL,
769 CS35L35_BST_CTL_MASK,
770 cs35l35->pdata.bst_vctl);
771
772 if (cs35l35->pdata.bst_ipk)
773 regmap_update_bits(cs35l35->regmap, CS35L35_BST_PEAK_I,
774 CS35L35_BST_IPK_MASK,
775 cs35l35->pdata.bst_ipk <<
776 CS35L35_BST_IPK_SHIFT);
777
778 if (cs35l35->pdata.gain_zc)
779 regmap_update_bits(cs35l35->regmap, CS35L35_PROTECT_CTL,
780 CS35L35_AMP_GAIN_ZC_MASK,
781 cs35l35->pdata.gain_zc <<
782 CS35L35_AMP_GAIN_ZC_SHIFT);
783
784 if (cs35l35->pdata.aud_channel)
785 regmap_update_bits(cs35l35->regmap,
786 CS35L35_AUDIN_RXLOC_CTL,
787 CS35L35_AUD_IN_LR_MASK,
788 cs35l35->pdata.aud_channel <<
789 CS35L35_AUD_IN_LR_SHIFT);
790
791 if (cs35l35->pdata.stereo) {
792 regmap_update_bits(cs35l35->regmap,
793 CS35L35_ADVIN_RXLOC_CTL,
794 CS35L35_ADV_IN_LR_MASK,
795 cs35l35->pdata.adv_channel <<
796 CS35L35_ADV_IN_LR_SHIFT);
797 if (cs35l35->pdata.shared_bst)
798 regmap_update_bits(cs35l35->regmap, CS35L35_CLASS_H_CTL,
799 CS35L35_CH_STEREO_MASK,
800 1 << CS35L35_CH_STEREO_SHIFT);
801 ret = snd_soc_add_codec_controls(codec, cs35l35_adv_controls,
802 ARRAY_SIZE(cs35l35_adv_controls));
803 if (ret)
804 return ret;
805 }
806
807 if (cs35l35->pdata.sp_drv_str)
808 regmap_update_bits(cs35l35->regmap, CS35L35_CLK_CTL1,
809 CS35L35_SP_DRV_MASK,
810 cs35l35->pdata.sp_drv_str <<
811 CS35L35_SP_DRV_SHIFT);
812 if (cs35l35->pdata.sp_drv_unused)
813 regmap_update_bits(cs35l35->regmap, CS35L35_SP_FMT_CTL3,
814 CS35L35_SP_I2S_DRV_MASK,
815 cs35l35->pdata.sp_drv_unused <<
816 CS35L35_SP_I2S_DRV_SHIFT);
817
818 if (classh->classh_algo_enable) {
819 if (classh->classh_bst_override)
820 regmap_update_bits(cs35l35->regmap,
821 CS35L35_CLASS_H_CTL,
822 CS35L35_CH_BST_OVR_MASK,
823 classh->classh_bst_override <<
824 CS35L35_CH_BST_OVR_SHIFT);
825 if (classh->classh_bst_max_limit)
826 regmap_update_bits(cs35l35->regmap,
827 CS35L35_CLASS_H_CTL,
828 CS35L35_CH_BST_LIM_MASK,
829 classh->classh_bst_max_limit <<
830 CS35L35_CH_BST_LIM_SHIFT);
831 if (classh->classh_mem_depth)
832 regmap_update_bits(cs35l35->regmap,
833 CS35L35_CLASS_H_CTL,
834 CS35L35_CH_MEM_DEPTH_MASK,
835 classh->classh_mem_depth <<
836 CS35L35_CH_MEM_DEPTH_SHIFT);
837 if (classh->classh_headroom)
838 regmap_update_bits(cs35l35->regmap,
839 CS35L35_CLASS_H_HEADRM_CTL,
840 CS35L35_CH_HDRM_CTL_MASK,
841 classh->classh_headroom <<
842 CS35L35_CH_HDRM_CTL_SHIFT);
843 if (classh->classh_release_rate)
844 regmap_update_bits(cs35l35->regmap,
845 CS35L35_CLASS_H_RELEASE_RATE,
846 CS35L35_CH_REL_RATE_MASK,
847 classh->classh_release_rate <<
848 CS35L35_CH_REL_RATE_SHIFT);
849 if (classh->classh_wk_fet_disable)
850 regmap_update_bits(cs35l35->regmap,
851 CS35L35_CLASS_H_FET_DRIVE_CTL,
852 CS35L35_CH_WKFET_DIS_MASK,
853 classh->classh_wk_fet_disable <<
854 CS35L35_CH_WKFET_DIS_SHIFT);
855 if (classh->classh_wk_fet_delay)
856 regmap_update_bits(cs35l35->regmap,
857 CS35L35_CLASS_H_FET_DRIVE_CTL,
858 CS35L35_CH_WKFET_DEL_MASK,
859 classh->classh_wk_fet_delay <<
860 CS35L35_CH_WKFET_DEL_SHIFT);
861 if (classh->classh_wk_fet_thld)
862 regmap_update_bits(cs35l35->regmap,
863 CS35L35_CLASS_H_FET_DRIVE_CTL,
864 CS35L35_CH_WKFET_THLD_MASK,
865 classh->classh_wk_fet_thld <<
866 CS35L35_CH_WKFET_THLD_SHIFT);
867 if (classh->classh_vpch_auto)
868 regmap_update_bits(cs35l35->regmap,
869 CS35L35_CLASS_H_VP_CTL,
870 CS35L35_CH_VP_AUTO_MASK,
871 classh->classh_vpch_auto <<
872 CS35L35_CH_VP_AUTO_SHIFT);
873 if (classh->classh_vpch_rate)
874 regmap_update_bits(cs35l35->regmap,
875 CS35L35_CLASS_H_VP_CTL,
876 CS35L35_CH_VP_RATE_MASK,
877 classh->classh_vpch_rate <<
878 CS35L35_CH_VP_RATE_SHIFT);
879 if (classh->classh_vpch_man)
880 regmap_update_bits(cs35l35->regmap,
881 CS35L35_CLASS_H_VP_CTL,
882 CS35L35_CH_VP_MAN_MASK,
883 classh->classh_vpch_man <<
884 CS35L35_CH_VP_MAN_SHIFT);
885 }
886
887 if (monitor_config->is_present) {
888 if (monitor_config->vmon_specs) {
889 regmap_update_bits(cs35l35->regmap,
890 CS35L35_SPKMON_DEPTH_CTL,
891 CS35L35_VMON_DEPTH_MASK,
892 monitor_config->vmon_dpth <<
893 CS35L35_VMON_DEPTH_SHIFT);
894 regmap_update_bits(cs35l35->regmap,
895 CS35L35_VMON_TXLOC_CTL,
896 CS35L35_MON_TXLOC_MASK,
897 monitor_config->vmon_loc <<
898 CS35L35_MON_TXLOC_SHIFT);
899 regmap_update_bits(cs35l35->regmap,
900 CS35L35_VMON_TXLOC_CTL,
901 CS35L35_MON_FRM_MASK,
902 monitor_config->vmon_frm <<
903 CS35L35_MON_FRM_SHIFT);
904 }
905 if (monitor_config->imon_specs) {
906 regmap_update_bits(cs35l35->regmap,
907 CS35L35_SPKMON_DEPTH_CTL,
908 CS35L35_IMON_DEPTH_MASK,
909 monitor_config->imon_dpth <<
910 CS35L35_IMON_DEPTH_SHIFT);
911 regmap_update_bits(cs35l35->regmap,
912 CS35L35_IMON_TXLOC_CTL,
913 CS35L35_MON_TXLOC_MASK,
914 monitor_config->imon_loc <<
915 CS35L35_MON_TXLOC_SHIFT);
916 regmap_update_bits(cs35l35->regmap,
917 CS35L35_IMON_TXLOC_CTL,
918 CS35L35_MON_FRM_MASK,
919 monitor_config->imon_frm <<
920 CS35L35_MON_FRM_SHIFT);
921 regmap_update_bits(cs35l35->regmap,
922 CS35L35_IMON_SCALE_CTL,
923 CS35L35_IMON_SCALE_MASK,
924 monitor_config->imon_scale <<
925 CS35L35_IMON_SCALE_SHIFT);
926 }
927 if (monitor_config->vpmon_specs) {
928 regmap_update_bits(cs35l35->regmap,
929 CS35L35_SUPMON_DEPTH_CTL,
930 CS35L35_VPMON_DEPTH_MASK,
931 monitor_config->vpmon_dpth <<
932 CS35L35_VPMON_DEPTH_SHIFT);
933 regmap_update_bits(cs35l35->regmap,
934 CS35L35_VPMON_TXLOC_CTL,
935 CS35L35_MON_TXLOC_MASK,
936 monitor_config->vpmon_loc <<
937 CS35L35_MON_TXLOC_SHIFT);
938 regmap_update_bits(cs35l35->regmap,
939 CS35L35_VPMON_TXLOC_CTL,
940 CS35L35_MON_FRM_MASK,
941 monitor_config->vpmon_frm <<
942 CS35L35_MON_FRM_SHIFT);
943 }
944 if (monitor_config->vbstmon_specs) {
945 regmap_update_bits(cs35l35->regmap,
946 CS35L35_SUPMON_DEPTH_CTL,
947 CS35L35_VBSTMON_DEPTH_MASK,
948 monitor_config->vpmon_dpth <<
949 CS35L35_VBSTMON_DEPTH_SHIFT);
950 regmap_update_bits(cs35l35->regmap,
951 CS35L35_VBSTMON_TXLOC_CTL,
952 CS35L35_MON_TXLOC_MASK,
953 monitor_config->vbstmon_loc <<
954 CS35L35_MON_TXLOC_SHIFT);
955 regmap_update_bits(cs35l35->regmap,
956 CS35L35_VBSTMON_TXLOC_CTL,
957 CS35L35_MON_FRM_MASK,
958 monitor_config->vbstmon_frm <<
959 CS35L35_MON_FRM_SHIFT);
960 }
961 if (monitor_config->vpbrstat_specs) {
962 regmap_update_bits(cs35l35->regmap,
963 CS35L35_SUPMON_DEPTH_CTL,
964 CS35L35_VPBRSTAT_DEPTH_MASK,
965 monitor_config->vpbrstat_dpth <<
966 CS35L35_VPBRSTAT_DEPTH_SHIFT);
967 regmap_update_bits(cs35l35->regmap,
968 CS35L35_VPBR_STATUS_TXLOC_CTL,
969 CS35L35_MON_TXLOC_MASK,
970 monitor_config->vpbrstat_loc <<
971 CS35L35_MON_TXLOC_SHIFT);
972 regmap_update_bits(cs35l35->regmap,
973 CS35L35_VPBR_STATUS_TXLOC_CTL,
974 CS35L35_MON_FRM_MASK,
975 monitor_config->vpbrstat_frm <<
976 CS35L35_MON_FRM_SHIFT);
977 }
978 if (monitor_config->zerofill_specs) {
979 regmap_update_bits(cs35l35->regmap,
980 CS35L35_SUPMON_DEPTH_CTL,
981 CS35L35_ZEROFILL_DEPTH_MASK,
982 monitor_config->zerofill_dpth <<
983 CS35L35_ZEROFILL_DEPTH_SHIFT);
984 regmap_update_bits(cs35l35->regmap,
985 CS35L35_ZERO_FILL_LOC_CTL,
986 CS35L35_MON_TXLOC_MASK,
987 monitor_config->zerofill_loc <<
988 CS35L35_MON_TXLOC_SHIFT);
989 regmap_update_bits(cs35l35->regmap,
990 CS35L35_ZERO_FILL_LOC_CTL,
991 CS35L35_MON_FRM_MASK,
992 monitor_config->zerofill_frm <<
993 CS35L35_MON_FRM_SHIFT);
994 }
995 }
996
997 return 0;
998}
999
1000static struct snd_soc_codec_driver soc_codec_dev_cs35l35 = {
1001 .probe = cs35l35_codec_probe,
1002 .set_sysclk = cs35l35_codec_set_sysclk,
1003 .component_driver = {
1004 .dapm_widgets = cs35l35_dapm_widgets,
1005 .num_dapm_widgets = ARRAY_SIZE(cs35l35_dapm_widgets),
1006
1007 .dapm_routes = cs35l35_audio_map,
1008 .num_dapm_routes = ARRAY_SIZE(cs35l35_audio_map),
1009
1010 .controls = cs35l35_aud_controls,
1011 .num_controls = ARRAY_SIZE(cs35l35_aud_controls),
1012 },
1013
1014};
1015
1016static struct regmap_config cs35l35_regmap = {
1017 .reg_bits = 8,
1018 .val_bits = 8,
1019
1020 .max_register = CS35L35_MAX_REGISTER,
1021 .reg_defaults = cs35l35_reg,
1022 .num_reg_defaults = ARRAY_SIZE(cs35l35_reg),
1023 .volatile_reg = cs35l35_volatile_register,
1024 .readable_reg = cs35l35_readable_register,
1025 .precious_reg = cs35l35_precious_register,
1026 .cache_type = REGCACHE_RBTREE,
1027};
1028
1029static irqreturn_t cs35l35_irq(int irq, void *data)
1030{
1031 struct cs35l35_private *cs35l35 = data;
1032 unsigned int sticky1, sticky2, sticky3, sticky4;
1033 unsigned int mask1, mask2, mask3, mask4, current1;
1034
1035 /* ack the irq by reading all status registers */
1036 regmap_read(cs35l35->regmap, CS35L35_INT_STATUS_4, &sticky4);
1037 regmap_read(cs35l35->regmap, CS35L35_INT_STATUS_3, &sticky3);
1038 regmap_read(cs35l35->regmap, CS35L35_INT_STATUS_2, &sticky2);
1039 regmap_read(cs35l35->regmap, CS35L35_INT_STATUS_1, &sticky1);
1040
1041 regmap_read(cs35l35->regmap, CS35L35_INT_MASK_4, &mask4);
1042 regmap_read(cs35l35->regmap, CS35L35_INT_MASK_3, &mask3);
1043 regmap_read(cs35l35->regmap, CS35L35_INT_MASK_2, &mask2);
1044 regmap_read(cs35l35->regmap, CS35L35_INT_MASK_1, &mask1);
1045
1046 /* Check to see if unmasked bits are active */
1047 if (!(sticky1 & ~mask1) && !(sticky2 & ~mask2) && !(sticky3 & ~mask3)
1048 && !(sticky4 & ~mask4))
1049 return IRQ_NONE;
1050
1051 if (sticky2 & CS35L35_PDN_DONE)
1052 complete(&cs35l35->pdn_done);
1053
1054 /* read the current values */
1055 regmap_read(cs35l35->regmap, CS35L35_INT_STATUS_1, &current1);
1056
1057 /* handle the interrupts */
1058 if (sticky1 & CS35L35_CAL_ERR) {
1059 dev_crit(cs35l35->dev, "Calibration Error\n");
1060
1061 /* error is no longer asserted; safe to reset */
1062 if (!(current1 & CS35L35_CAL_ERR)) {
1063 pr_debug("%s : Cal error release\n", __func__);
1064 regmap_update_bits(cs35l35->regmap,
1065 CS35L35_PROT_RELEASE_CTL,
1066 CS35L35_CAL_ERR_RLS, 0);
1067 regmap_update_bits(cs35l35->regmap,
1068 CS35L35_PROT_RELEASE_CTL,
1069 CS35L35_CAL_ERR_RLS,
1070 CS35L35_CAL_ERR_RLS);
1071 regmap_update_bits(cs35l35->regmap,
1072 CS35L35_PROT_RELEASE_CTL,
1073 CS35L35_CAL_ERR_RLS, 0);
1074 }
1075 }
1076
1077 if (sticky1 & CS35L35_AMP_SHORT) {
1078 dev_crit(cs35l35->dev, "AMP Short Error\n");
1079 /* error is no longer asserted; safe to reset */
1080 if (!(current1 & CS35L35_AMP_SHORT)) {
1081 dev_dbg(cs35l35->dev, "Amp short error release\n");
1082 regmap_update_bits(cs35l35->regmap,
1083 CS35L35_PROT_RELEASE_CTL,
1084 CS35L35_SHORT_RLS, 0);
1085 regmap_update_bits(cs35l35->regmap,
1086 CS35L35_PROT_RELEASE_CTL,
1087 CS35L35_SHORT_RLS,
1088 CS35L35_SHORT_RLS);
1089 regmap_update_bits(cs35l35->regmap,
1090 CS35L35_PROT_RELEASE_CTL,
1091 CS35L35_SHORT_RLS, 0);
1092 }
1093 }
1094
1095 if (sticky1 & CS35L35_OTW) {
1096 dev_warn(cs35l35->dev, "Over temperature warning\n");
1097
1098 /* error is no longer asserted; safe to reset */
1099 if (!(current1 & CS35L35_OTW)) {
1100 dev_dbg(cs35l35->dev, "Over temperature warn release\n");
1101 regmap_update_bits(cs35l35->regmap,
1102 CS35L35_PROT_RELEASE_CTL,
1103 CS35L35_OTW_RLS, 0);
1104 regmap_update_bits(cs35l35->regmap,
1105 CS35L35_PROT_RELEASE_CTL,
1106 CS35L35_OTW_RLS,
1107 CS35L35_OTW_RLS);
1108 regmap_update_bits(cs35l35->regmap,
1109 CS35L35_PROT_RELEASE_CTL,
1110 CS35L35_OTW_RLS, 0);
1111 }
1112 }
1113
1114 if (sticky1 & CS35L35_OTE) {
1115 dev_crit(cs35l35->dev, "Over temperature error\n");
1116 /* error is no longer asserted; safe to reset */
1117 if (!(current1 & CS35L35_OTE)) {
1118 dev_dbg(cs35l35->dev, "Over temperature error release\n");
1119 regmap_update_bits(cs35l35->regmap,
1120 CS35L35_PROT_RELEASE_CTL,
1121 CS35L35_OTE_RLS, 0);
1122 regmap_update_bits(cs35l35->regmap,
1123 CS35L35_PROT_RELEASE_CTL,
1124 CS35L35_OTE_RLS,
1125 CS35L35_OTE_RLS);
1126 regmap_update_bits(cs35l35->regmap,
1127 CS35L35_PROT_RELEASE_CTL,
1128 CS35L35_OTE_RLS, 0);
1129 }
1130 }
1131
1132 if (sticky3 & CS35L35_BST_HIGH) {
1133 dev_crit(cs35l35->dev, "VBST error: powering off!\n");
1134 regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL2,
1135 CS35L35_PDN_AMP, CS35L35_PDN_AMP);
1136 regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL1,
1137 CS35L35_PDN_ALL, CS35L35_PDN_ALL);
1138 }
1139
1140 if (sticky3 & CS35L35_LBST_SHORT) {
1141 dev_crit(cs35l35->dev, "LBST error: powering off!\n");
1142 regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL2,
1143 CS35L35_PDN_AMP, CS35L35_PDN_AMP);
1144 regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL1,
1145 CS35L35_PDN_ALL, CS35L35_PDN_ALL);
1146 }
1147
1148 if (sticky2 & CS35L35_VPBR_ERR)
1149 dev_dbg(cs35l35->dev, "Error: Reactive Brownout\n");
1150
1151 if (sticky4 & CS35L35_VMON_OVFL)
1152 dev_dbg(cs35l35->dev, "Error: VMON overflow\n");
1153
1154 if (sticky4 & CS35L35_IMON_OVFL)
1155 dev_dbg(cs35l35->dev, "Error: IMON overflow\n");
1156
1157 return IRQ_HANDLED;
1158}
1159
1160
1161static int cs35l35_handle_of_data(struct i2c_client *i2c_client,
1162 struct cs35l35_platform_data *pdata)
1163{
1164 struct device_node *np = i2c_client->dev.of_node;
1165 struct device_node *classh, *signal_format;
1166 struct classh_cfg *classh_config = &pdata->classh_algo;
1167 struct monitor_cfg *monitor_config = &pdata->mon_cfg;
1168 unsigned int val32 = 0;
1169 u8 monitor_array[4];
1170 const int imon_array_size = ARRAY_SIZE(monitor_array);
1171 const int mon_array_size = imon_array_size - 1;
1172 int ret = 0;
1173
1174 if (!np)
1175 return 0;
1176
1177 pdata->bst_pdn_fet_on = of_property_read_bool(np,
1178 "cirrus,boost-pdn-fet-on");
1179
1180 ret = of_property_read_u32(np, "cirrus,boost-ctl-millivolt", &val32);
1181 if (ret >= 0) {
1182 if (val32 < 2600 || val32 > 9000) {
1183 dev_err(&i2c_client->dev,
1184 "Invalid Boost Voltage %d mV\n", val32);
1185 return -EINVAL;
1186 }
1187 pdata->bst_vctl = ((val32 - 2600) / 100) + 1;
1188 }
1189
1190 ret = of_property_read_u32(np, "cirrus,boost-peak-milliamp", &val32);
1191 if (ret >= 0) {
1192 if (val32 < 1680 || val32 > 4480) {
1193 dev_err(&i2c_client->dev,
1194 "Invalid Boost Peak Current %u mA\n", val32);
1195 return -EINVAL;
1196 }
1197
1198 pdata->bst_ipk = (val32 - 1680) / 110;
1199 }
1200
1201 if (of_property_read_u32(np, "cirrus,sp-drv-strength", &val32) >= 0)
1202 pdata->sp_drv_str = val32;
1203 if (of_property_read_u32(np, "cirrus,sp-drv-unused", &val32) >= 0)
1204 pdata->sp_drv_unused = val32 | CS35L35_VALID_PDATA;
1205
1206 pdata->stereo = of_property_read_bool(np, "cirrus,stereo-config");
1207
1208 if (pdata->stereo) {
1209 ret = of_property_read_u32(np, "cirrus,audio-channel", &val32);
1210 if (ret >= 0)
1211 pdata->aud_channel = val32;
1212
1213 ret = of_property_read_u32(np, "cirrus,advisory-channel",
1214 &val32);
1215 if (ret >= 0)
1216 pdata->adv_channel = val32;
1217
1218 pdata->shared_bst = of_property_read_bool(np,
1219 "cirrus,shared-boost");
1220 }
1221
1222 pdata->ext_bst = of_property_read_bool(np, "cirrus,external-boost");
1223
1224 pdata->gain_zc = of_property_read_bool(np, "cirrus,amp-gain-zc");
1225
1226 classh = of_get_child_by_name(np, "cirrus,classh-internal-algo");
1227 classh_config->classh_algo_enable = classh ? true : false;
1228
1229 if (classh_config->classh_algo_enable) {
1230 classh_config->classh_bst_override =
1231 of_property_read_bool(np, "cirrus,classh-bst-overide");
1232
1233 ret = of_property_read_u32(classh,
1234 "cirrus,classh-bst-max-limit",
1235 &val32);
1236 if (ret >= 0) {
1237 val32 |= CS35L35_VALID_PDATA;
1238 classh_config->classh_bst_max_limit = val32;
1239 }
1240
1241 ret = of_property_read_u32(classh,
1242 "cirrus,classh-bst-max-limit",
1243 &val32);
1244 if (ret >= 0) {
1245 val32 |= CS35L35_VALID_PDATA;
1246 classh_config->classh_bst_max_limit = val32;
1247 }
1248
1249 ret = of_property_read_u32(classh, "cirrus,classh-mem-depth",
1250 &val32);
1251 if (ret >= 0) {
1252 val32 |= CS35L35_VALID_PDATA;
1253 classh_config->classh_mem_depth = val32;
1254 }
1255
1256 ret = of_property_read_u32(classh, "cirrus,classh-release-rate",
1257 &val32);
1258 if (ret >= 0)
1259 classh_config->classh_release_rate = val32;
1260
1261 ret = of_property_read_u32(classh, "cirrus,classh-headroom",
1262 &val32);
1263 if (ret >= 0) {
1264 val32 |= CS35L35_VALID_PDATA;
1265 classh_config->classh_headroom = val32;
1266 }
1267
1268 ret = of_property_read_u32(classh,
1269 "cirrus,classh-wk-fet-disable",
1270 &val32);
1271 if (ret >= 0)
1272 classh_config->classh_wk_fet_disable = val32;
1273
1274 ret = of_property_read_u32(classh, "cirrus,classh-wk-fet-delay",
1275 &val32);
1276 if (ret >= 0) {
1277 val32 |= CS35L35_VALID_PDATA;
1278 classh_config->classh_wk_fet_delay = val32;
1279 }
1280
1281 ret = of_property_read_u32(classh, "cirrus,classh-wk-fet-thld",
1282 &val32);
1283 if (ret >= 0)
1284 classh_config->classh_wk_fet_thld = val32;
1285
1286 ret = of_property_read_u32(classh, "cirrus,classh-vpch-auto",
1287 &val32);
1288 if (ret >= 0) {
1289 val32 |= CS35L35_VALID_PDATA;
1290 classh_config->classh_vpch_auto = val32;
1291 }
1292
1293 ret = of_property_read_u32(classh, "cirrus,classh-vpch-rate",
1294 &val32);
1295 if (ret >= 0) {
1296 val32 |= CS35L35_VALID_PDATA;
1297 classh_config->classh_vpch_rate = val32;
1298 }
1299
1300 ret = of_property_read_u32(classh, "cirrus,classh-vpch-man",
1301 &val32);
1302 if (ret >= 0)
1303 classh_config->classh_vpch_man = val32;
1304 }
1305 of_node_put(classh);
1306
1307 /* frame depth location */
1308 signal_format = of_get_child_by_name(np, "cirrus,monitor-signal-format");
1309 monitor_config->is_present = signal_format ? true : false;
1310 if (monitor_config->is_present) {
1311 ret = of_property_read_u8_array(signal_format, "cirrus,imon",
1312 monitor_array, imon_array_size);
1313 if (!ret) {
1314 monitor_config->imon_specs = true;
1315 monitor_config->imon_dpth = monitor_array[0];
1316 monitor_config->imon_loc = monitor_array[1];
1317 monitor_config->imon_frm = monitor_array[2];
1318 monitor_config->imon_scale = monitor_array[3];
1319 }
1320 ret = of_property_read_u8_array(signal_format, "cirrus,vmon",
1321 monitor_array, mon_array_size);
1322 if (!ret) {
1323 monitor_config->vmon_specs = true;
1324 monitor_config->vmon_dpth = monitor_array[0];
1325 monitor_config->vmon_loc = monitor_array[1];
1326 monitor_config->vmon_frm = monitor_array[2];
1327 }
1328 ret = of_property_read_u8_array(signal_format, "cirrus,vpmon",
1329 monitor_array, mon_array_size);
1330 if (!ret) {
1331 monitor_config->vpmon_specs = true;
1332 monitor_config->vpmon_dpth = monitor_array[0];
1333 monitor_config->vpmon_loc = monitor_array[1];
1334 monitor_config->vpmon_frm = monitor_array[2];
1335 }
1336 ret = of_property_read_u8_array(signal_format, "cirrus,vbstmon",
1337 monitor_array, mon_array_size);
1338 if (!ret) {
1339 monitor_config->vbstmon_specs = true;
1340 monitor_config->vbstmon_dpth = monitor_array[0];
1341 monitor_config->vbstmon_loc = monitor_array[1];
1342 monitor_config->vbstmon_frm = monitor_array[2];
1343 }
1344 ret = of_property_read_u8_array(signal_format, "cirrus,vpbrstat",
1345 monitor_array, mon_array_size);
1346 if (!ret) {
1347 monitor_config->vpbrstat_specs = true;
1348 monitor_config->vpbrstat_dpth = monitor_array[0];
1349 monitor_config->vpbrstat_loc = monitor_array[1];
1350 monitor_config->vpbrstat_frm = monitor_array[2];
1351 }
1352 ret = of_property_read_u8_array(signal_format, "cirrus,zerofill",
1353 monitor_array, mon_array_size);
1354 if (!ret) {
1355 monitor_config->zerofill_specs = true;
1356 monitor_config->zerofill_dpth = monitor_array[0];
1357 monitor_config->zerofill_loc = monitor_array[1];
1358 monitor_config->zerofill_frm = monitor_array[2];
1359 }
1360 }
1361 of_node_put(signal_format);
1362
1363 return 0;
1364}
1365
1366/* Errata Rev A0 */
1367static const struct reg_sequence cs35l35_errata_patch[] = {
1368
1369 { 0x7F, 0x99 },
1370 { 0x00, 0x99 },
1371 { 0x52, 0x22 },
1372 { 0x04, 0x14 },
1373 { 0x6D, 0x44 },
1374 { 0x24, 0x10 },
1375 { 0x58, 0xC4 },
1376 { 0x00, 0x98 },
1377 { 0x18, 0x08 },
1378 { 0x00, 0x00 },
1379 { 0x7F, 0x00 },
1380};
1381
1382static int cs35l35_i2c_probe(struct i2c_client *i2c_client,
1383 const struct i2c_device_id *id)
1384{
1385 struct cs35l35_private *cs35l35;
1386 struct device *dev = &i2c_client->dev;
1387 struct cs35l35_platform_data *pdata = dev_get_platdata(dev);
1388 int i;
1389 int ret;
1390 unsigned int devid = 0;
1391 unsigned int reg;
1392
1393 cs35l35 = devm_kzalloc(dev, sizeof(struct cs35l35_private), GFP_KERNEL);
1394 if (!cs35l35)
1395 return -ENOMEM;
1396
1397 cs35l35->dev = dev;
1398
1399 i2c_set_clientdata(i2c_client, cs35l35);
1400 cs35l35->regmap = devm_regmap_init_i2c(i2c_client, &cs35l35_regmap);
1401 if (IS_ERR(cs35l35->regmap)) {
1402 ret = PTR_ERR(cs35l35->regmap);
1403 dev_err(dev, "regmap_init() failed: %d\n", ret);
1404 goto err;
1405 }
1406
1407 for (i = 0; i < ARRAY_SIZE(cs35l35_supplies); i++)
1408 cs35l35->supplies[i].supply = cs35l35_supplies[i];
1409
1410 cs35l35->num_supplies = ARRAY_SIZE(cs35l35_supplies);
1411
1412 ret = devm_regulator_bulk_get(dev, cs35l35->num_supplies,
1413 cs35l35->supplies);
1414 if (ret != 0) {
1415 dev_err(dev, "Failed to request core supplies: %d\n", ret);
1416 return ret;
1417 }
1418
1419 if (pdata) {
1420 cs35l35->pdata = *pdata;
1421 } else {
1422 pdata = devm_kzalloc(dev, sizeof(struct cs35l35_platform_data),
1423 GFP_KERNEL);
1424 if (!pdata)
1425 return -ENOMEM;
1426 if (i2c_client->dev.of_node) {
1427 ret = cs35l35_handle_of_data(i2c_client, pdata);
1428 if (ret != 0)
1429 return ret;
1430
1431 }
1432 cs35l35->pdata = *pdata;
1433 }
1434
1435 ret = regulator_bulk_enable(cs35l35->num_supplies,
1436 cs35l35->supplies);
1437 if (ret != 0) {
1438 dev_err(dev, "Failed to enable core supplies: %d\n", ret);
1439 return ret;
1440 }
1441
1442 /* returning NULL can be valid if in stereo mode */
1443 cs35l35->reset_gpio = devm_gpiod_get_optional(dev, "reset",
1444 GPIOD_OUT_LOW);
1445 if (IS_ERR(cs35l35->reset_gpio)) {
1446 ret = PTR_ERR(cs35l35->reset_gpio);
1447 cs35l35->reset_gpio = NULL;
1448 if (ret == -EBUSY) {
1449 dev_info(dev,
1450 "Reset line busy, assuming shared reset\n");
1451 } else {
1452 dev_err(dev, "Failed to get reset GPIO: %d\n", ret);
1453 goto err;
1454 }
1455 }
1456
1457 gpiod_set_value_cansleep(cs35l35->reset_gpio, 1);
1458
1459 init_completion(&cs35l35->pdn_done);
1460
1461 ret = devm_request_threaded_irq(dev, i2c_client->irq, NULL, cs35l35_irq,
1462 IRQF_ONESHOT | IRQF_TRIGGER_LOW |
1463 IRQF_SHARED, "cs35l35", cs35l35);
1464 if (ret != 0) {
1465 dev_err(dev, "Failed to request IRQ: %d\n", ret);
1466 goto err;
1467 }
1468 /* initialize codec */
1469 ret = regmap_read(cs35l35->regmap, CS35L35_DEVID_AB, &reg);
1470
1471 devid = (reg & 0xFF) << 12;
1472 ret = regmap_read(cs35l35->regmap, CS35L35_DEVID_CD, &reg);
1473 devid |= (reg & 0xFF) << 4;
1474 ret = regmap_read(cs35l35->regmap, CS35L35_DEVID_E, &reg);
1475 devid |= (reg & 0xF0) >> 4;
1476
1477 if (devid != CS35L35_CHIP_ID) {
1478 dev_err(dev, "CS35L35 Device ID (%X). Expected ID %X\n",
1479 devid, CS35L35_CHIP_ID);
1480 ret = -ENODEV;
1481 goto err;
1482 }
1483
1484 ret = regmap_read(cs35l35->regmap, CS35L35_REV_ID, &reg);
1485 if (ret < 0) {
1486 dev_err(dev, "Get Revision ID failed: %d\n", ret);
1487 goto err;
1488 }
1489
1490 ret = regmap_register_patch(cs35l35->regmap, cs35l35_errata_patch,
1491 ARRAY_SIZE(cs35l35_errata_patch));
1492 if (ret < 0) {
1493 dev_err(dev, "Failed to apply errata patch: %d\n", ret);
1494 goto err;
1495 }
1496
1497 dev_info(dev, "Cirrus Logic CS35L35 (%x), Revision: %02X\n",
1498 devid, reg & 0xFF);
1499
1500 /* Set the INT Masks for critical errors */
1501 regmap_write(cs35l35->regmap, CS35L35_INT_MASK_1,
1502 CS35L35_INT1_CRIT_MASK);
1503 regmap_write(cs35l35->regmap, CS35L35_INT_MASK_2,
1504 CS35L35_INT2_CRIT_MASK);
1505 regmap_write(cs35l35->regmap, CS35L35_INT_MASK_3,
1506 CS35L35_INT3_CRIT_MASK);
1507 regmap_write(cs35l35->regmap, CS35L35_INT_MASK_4,
1508 CS35L35_INT4_CRIT_MASK);
1509
1510 regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL2,
1511 CS35L35_PWR2_PDN_MASK,
1512 CS35L35_PWR2_PDN_MASK);
1513
1514 if (cs35l35->pdata.bst_pdn_fet_on)
1515 regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL2,
1516 CS35L35_PDN_BST_MASK,
1517 1 << CS35L35_PDN_BST_FETON_SHIFT);
1518 else
1519 regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL2,
1520 CS35L35_PDN_BST_MASK,
1521 1 << CS35L35_PDN_BST_FETOFF_SHIFT);
1522
1523 regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL3,
1524 CS35L35_PWR3_PDN_MASK,
1525 CS35L35_PWR3_PDN_MASK);
1526
1527 regmap_update_bits(cs35l35->regmap, CS35L35_PROTECT_CTL,
1528 CS35L35_AMP_MUTE_MASK, 1 << CS35L35_AMP_MUTE_SHIFT);
1529
1530 ret = snd_soc_register_codec(dev, &soc_codec_dev_cs35l35, cs35l35_dai,
1531 ARRAY_SIZE(cs35l35_dai));
1532 if (ret < 0) {
1533 dev_err(dev, "Failed to register codec: %d\n", ret);
1534 goto err;
1535 }
1536
1537 return 0;
1538
1539err:
1540 regulator_bulk_disable(cs35l35->num_supplies,
1541 cs35l35->supplies);
1542 gpiod_set_value_cansleep(cs35l35->reset_gpio, 0);
1543
1544 return ret;
1545}
1546
1547static int cs35l35_i2c_remove(struct i2c_client *client)
1548{
1549 snd_soc_unregister_codec(&client->dev);
1550 return 0;
1551}
1552
1553static const struct of_device_id cs35l35_of_match[] = {
1554 {.compatible = "cirrus,cs35l35"},
1555 {},
1556};
1557MODULE_DEVICE_TABLE(of, cs35l35_of_match);
1558
1559static const struct i2c_device_id cs35l35_id[] = {
1560 {"cs35l35", 0},
1561 {}
1562};
1563
1564MODULE_DEVICE_TABLE(i2c, cs35l35_id);
1565
1566static struct i2c_driver cs35l35_i2c_driver = {
1567 .driver = {
1568 .name = "cs35l35",
1569 .of_match_table = cs35l35_of_match,
1570 },
1571 .id_table = cs35l35_id,
1572 .probe = cs35l35_i2c_probe,
1573 .remove = cs35l35_i2c_remove,
1574};
1575
1576module_i2c_driver(cs35l35_i2c_driver);
1577
1578MODULE_DESCRIPTION("ASoC CS35L35 driver");
1579MODULE_AUTHOR("Brian Austin, Cirrus Logic Inc, <brian.austin@cirrus.com>");
1580MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cs35l35.h b/sound/soc/codecs/cs35l35.h
new file mode 100644
index 000000000000..5a6e43a87c4d
--- /dev/null
+++ b/sound/soc/codecs/cs35l35.h
@@ -0,0 +1,294 @@
1/*
2 * cs35l35.h -- CS35L35 ALSA SoC audio driver
3 *
4 * Copyright 2016 Cirrus Logic, Inc.
5 *
6 * Author: Brian Austin <brian.austin@cirrus.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#ifndef __CS35L35_H__
15#define __CS35L35_H__
16
17#define CS35L35_FIRSTREG 0x01
18#define CS35L35_LASTREG 0x7E
19#define CS35L35_CHIP_ID 0x00035A35
20#define CS35L35_DEVID_AB 0x01 /* Device ID A & B [RO] */
21#define CS35L35_DEVID_CD 0x02 /* Device ID C & D [RO] */
22#define CS35L35_DEVID_E 0x03 /* Device ID E [RO] */
23#define CS35L35_FAB_ID 0x04 /* Fab ID [RO] */
24#define CS35L35_REV_ID 0x05 /* Revision ID [RO] */
25#define CS35L35_PWRCTL1 0x06 /* Power Ctl 1 */
26#define CS35L35_PWRCTL2 0x07 /* Power Ctl 2 */
27#define CS35L35_PWRCTL3 0x08 /* Power Ctl 3 */
28#define CS35L35_CLK_CTL1 0x0A /* Clocking Ctl 1 */
29#define CS35L35_CLK_CTL2 0x0B /* Clocking Ctl 2 */
30#define CS35L35_CLK_CTL3 0x0C /* Clocking Ctl 3 */
31#define CS35L35_SP_FMT_CTL1 0x0D /* Serial Port Format CTL1 */
32#define CS35L35_SP_FMT_CTL2 0x0E /* Serial Port Format CTL2 */
33#define CS35L35_SP_FMT_CTL3 0x0F /* Serial Port Format CTL3 */
34#define CS35L35_MAG_COMP_CTL 0x13 /* Magnitude Comp CTL */
35#define CS35L35_AMP_INP_DRV_CTL 0x14 /* Amp Input Drive Ctl */
36#define CS35L35_AMP_DIG_VOL_CTL 0x15 /* Amplifier Dig Volume Ctl */
37#define CS35L35_AMP_DIG_VOL 0x16 /* Amplifier Dig Volume */
38#define CS35L35_ADV_DIG_VOL 0x17 /* Advisory Digital Volume */
39#define CS35L35_PROTECT_CTL 0x18 /* Amp Gain - Prot Ctl Param */
40#define CS35L35_AMP_GAIN_AUD_CTL 0x19 /* Amp Serial Port Gain Ctl */
41#define CS35L35_AMP_GAIN_PDM_CTL 0x1A /* Amplifier Gain PDM Ctl */
42#define CS35L35_AMP_GAIN_ADV_CTL 0x1B /* Amplifier Gain Ctl */
43#define CS35L35_GPI_CTL 0x1C /* GPI Ctl */
44#define CS35L35_BST_CVTR_V_CTL 0x1D /* Boost Conv Voltage Ctl */
45#define CS35L35_BST_PEAK_I 0x1E /* Boost Conv Peak Current */
46#define CS35L35_BST_RAMP_CTL 0x20 /* Boost Conv Soft Ramp Ctl */
47#define CS35L35_BST_CONV_COEF_1 0x21 /* Boost Conv Coefficients 1 */
48#define CS35L35_BST_CONV_COEF_2 0x22 /* Boost Conv Coefficients 2 */
49#define CS35L35_BST_CONV_SLOPE_COMP 0x23 /* Boost Conv Slope Comp */
50#define CS35L35_BST_CONV_SW_FREQ 0x24 /* Boost Conv L BST SW Freq */
51#define CS35L35_CLASS_H_CTL 0x30 /* CLS H Control */
52#define CS35L35_CLASS_H_HEADRM_CTL 0x31 /* CLS H Headroom Ctl */
53#define CS35L35_CLASS_H_RELEASE_RATE 0x32 /* CLS H Release Rate */
54#define CS35L35_CLASS_H_FET_DRIVE_CTL 0x33 /* CLS H Weak FET Drive Ctl */
55#define CS35L35_CLASS_H_VP_CTL 0x34 /* CLS H VP Ctl */
56#define CS35L35_CLASS_H_STATUS 0x38 /* CLS H Status */
57#define CS35L35_VPBR_CTL 0x3A /* VPBR Ctl */
58#define CS35L35_VPBR_VOL_CTL 0x3B /* VPBR Volume Ctl */
59#define CS35L35_VPBR_TIMING_CTL 0x3C /* VPBR Timing Ctl */
60#define CS35L35_VPBR_MODE_VOL_CTL 0x3D /* VPBR Mode/Attack Vol Ctl */
61#define CS35L35_VPBR_ATTEN_STATUS 0x4B /* VPBR Attenuation Status */
62#define CS35L35_SPKR_MON_CTL 0x4E /* Speaker Monitoring Ctl */
63#define CS35L35_IMON_SCALE_CTL 0x51 /* IMON Scale Ctl */
64#define CS35L35_AUDIN_RXLOC_CTL 0x52 /* Audio Input RX Loc Ctl */
65#define CS35L35_ADVIN_RXLOC_CTL 0x53 /* Advisory Input RX Loc Ctl */
66#define CS35L35_VMON_TXLOC_CTL 0x54 /* VMON TX Loc Ctl */
67#define CS35L35_IMON_TXLOC_CTL 0x55 /* IMON TX Loc Ctl */
68#define CS35L35_VPMON_TXLOC_CTL 0x56 /* VPMON TX Loc Ctl */
69#define CS35L35_VBSTMON_TXLOC_CTL 0x57 /* VBSTMON TX Loc Ctl */
70#define CS35L35_VPBR_STATUS_TXLOC_CTL 0x58 /* VPBR Status TX Loc Ctl */
71#define CS35L35_ZERO_FILL_LOC_CTL 0x59 /* Zero Fill Loc Ctl */
72#define CS35L35_AUDIN_DEPTH_CTL 0x5A /* Audio Input Depth Ctl */
73#define CS35L35_SPKMON_DEPTH_CTL 0x5B /* SPK Mon Output Depth Ctl */
74#define CS35L35_SUPMON_DEPTH_CTL 0x5C /* Supply Mon Out Depth Ctl */
75#define CS35L35_ZEROFILL_DEPTH_CTL 0x5D /* Zero Fill Mon Output Ctl */
76#define CS35L35_MULT_DEV_SYNCH1 0x62 /* Multidevice Synch */
77#define CS35L35_MULT_DEV_SYNCH2 0x63 /* Multidevice Synch 2 */
78#define CS35L35_PROT_RELEASE_CTL 0x64 /* Protection Release Ctl */
79#define CS35L35_DIAG_MODE_REG_LOCK 0x68 /* Diagnostic Mode Reg Lock */
80#define CS35L35_DIAG_MODE_CTL_1 0x69 /* Diagnostic Mode Ctl 1 */
81#define CS35L35_DIAG_MODE_CTL_2 0x6A /* Diagnostic Mode Ctl 2 */
82#define CS35L35_INT_MASK_1 0x70 /* Interrupt Mask 1 */
83#define CS35L35_INT_MASK_2 0x71 /* Interrupt Mask 2 */
84#define CS35L35_INT_MASK_3 0x72 /* Interrupt Mask 3 */
85#define CS35L35_INT_MASK_4 0x73 /* Interrupt Mask 4 */
86#define CS35L35_INT_STATUS_1 0x74 /* Interrupt Status 1 */
87#define CS35L35_INT_STATUS_2 0x75 /* Interrupt Status 2 */
88#define CS35L35_INT_STATUS_3 0x76 /* Interrupt Status 3 */
89#define CS35L35_INT_STATUS_4 0x77 /* Interrupt Status 4 */
90#define CS35L35_PLL_STATUS 0x78 /* PLL Status */
91#define CS35L35_OTP_TRIM_STATUS 0x7E /* OTP Trim Status */
92
93#define CS35L35_MAX_REGISTER 0x7F
94
95/* CS35L35_PWRCTL1 */
96#define CS35L35_SFT_RST 0x80
97#define CS35L35_DISCHG_FLT 0x02
98#define CS35L35_PDN_ALL 0x01
99
100/* CS35L35_PWRCTL2 */
101#define CS35L35_PDN_VMON 0x80
102#define CS35L35_PDN_IMON 0x40
103#define CS35L35_PDN_CLASSH 0x20
104#define CS35L35_PDN_VPBR 0x10
105#define CS35L35_PDN_BST 0x04
106#define CS35L35_PDN_AMP 0x01
107
108/* CS35L35_PWRCTL3 */
109#define CS35L35_PDN_VBSTMON_OUT 0x10
110#define CS35L35_PDN_VMON_OUT 0x08
111
112#define CS35L35_AUDIN_DEPTH_MASK 0x03
113#define CS35L35_AUDIN_DEPTH_SHIFT 0
114#define CS35L35_ADVIN_DEPTH_MASK 0x0C
115#define CS35L35_ADVIN_DEPTH_SHIFT 2
116#define CS35L35_SDIN_DEPTH_8 0x01
117#define CS35L35_SDIN_DEPTH_16 0x02
118#define CS35L35_SDIN_DEPTH_24 0x03
119
120#define CS35L35_SDOUT_DEPTH_8 0x01
121#define CS35L35_SDOUT_DEPTH_12 0x02
122#define CS35L35_SDOUT_DEPTH_16 0x03
123
124#define CS35L35_AUD_IN_LR_MASK 0x80
125#define CS35L35_AUD_IN_LR_SHIFT 7
126#define CS35L35_ADV_IN_LR_MASK 0x80
127#define CS35L35_ADV_IN_LR_SHIFT 7
128#define CS35L35_AUD_IN_LOC_MASK 0x0F
129#define CS35L35_AUD_IN_LOC_SHIFT 0
130#define CS35L35_ADV_IN_LOC_MASK 0x0F
131#define CS35L35_ADV_IN_LOC_SHIFT 0
132
133#define CS35L35_IMON_DEPTH_MASK 0x03
134#define CS35L35_IMON_DEPTH_SHIFT 0
135#define CS35L35_VMON_DEPTH_MASK 0x0C
136#define CS35L35_VMON_DEPTH_SHIFT 2
137#define CS35L35_VBSTMON_DEPTH_MASK 0x03
138#define CS35L35_VBSTMON_DEPTH_SHIFT 0
139#define CS35L35_VPMON_DEPTH_MASK 0x0C
140#define CS35L35_VPMON_DEPTH_SHIFT 2
141#define CS35L35_VPBRSTAT_DEPTH_MASK 0x30
142#define CS35L35_VPBRSTAT_DEPTH_SHIFT 4
143#define CS35L35_ZEROFILL_DEPTH_MASK 0x03
144#define CS35L35_ZEROFILL_DEPTH_SHIFT 0x00
145
146#define CS35L35_MON_TXLOC_MASK 0x3F
147#define CS35L35_MON_TXLOC_SHIFT 0
148#define CS35L35_MON_FRM_MASK 0x80
149#define CS35L35_MON_FRM_SHIFT 7
150
151#define CS35L35_IMON_SCALE_MASK 0xF8
152#define CS35L35_IMON_SCALE_SHIFT 3
153
154#define CS35L35_MS_MASK 0x80
155#define CS35L35_MS_SHIFT 7
156#define CS35L35_SPMODE_MASK 0x40
157#define CS35L35_SP_DRV_MASK 0x10
158#define CS35L35_SP_DRV_SHIFT 4
159#define CS35L35_CLK_CTL2_MASK 0xFF
160#define CS35L35_PDM_MODE_MASK 0x40
161#define CS35L35_PDM_MODE_SHIFT 6
162#define CS35L35_CLK_SOURCE_MASK 0x03
163#define CS35L35_CLK_SOURCE_SHIFT 0
164#define CS35L35_CLK_SOURCE_MCLK 0
165#define CS35L35_CLK_SOURCE_SCLK 1
166#define CS35L35_CLK_SOURCE_PDM 2
167
168#define CS35L35_SP_SCLKS_MASK 0x0F
169#define CS35L35_SP_SCLKS_SHIFT 0x00
170#define CS35L35_SP_SCLKS_16FS 0x03
171#define CS35L35_SP_SCLKS_32FS 0x07
172#define CS35L35_SP_SCLKS_48FS 0x0B
173#define CS35L35_SP_SCLKS_64FS 0x0F
174#define CS35L35_SP_RATE_MASK 0xC0
175
176#define CS35L35_PDN_BST_MASK 0x06
177#define CS35L35_PDN_BST_FETON_SHIFT 1
178#define CS35L35_PDN_BST_FETOFF_SHIFT 2
179#define CS35L35_PWR2_PDN_MASK 0xE0
180#define CS35L35_PWR3_PDN_MASK 0x1E
181#define CS35L35_PDN_ALL_MASK 0x01
182#define CS35L35_DISCHG_FILT_MASK 0x02
183#define CS35L35_DISCHG_FILT_SHIFT 1
184#define CS35L35_MCLK_DIS_MASK 0x04
185#define CS35L35_MCLK_DIS_SHIFT 2
186
187#define CS35L35_BST_CTL_MASK 0x7F
188#define CS35L35_BST_CTL_SHIFT 0
189#define CS35L35_BST_IPK_MASK 0x1F
190#define CS35L35_BST_IPK_SHIFT 0
191#define CS35L35_AMP_MUTE_MASK 0x20
192#define CS35L35_AMP_MUTE_SHIFT 5
193#define CS35L35_AMP_GAIN_ZC_MASK 0x10
194#define CS35L35_AMP_GAIN_ZC_SHIFT 4
195
196#define CS35L35_AMP_DIGSFT_MASK 0x02
197#define CS35L35_AMP_DIGSFT_SHIFT 1
198
199/* CS35L35_SP_FMT_CTL3 */
200#define CS35L35_SP_I2S_DRV_MASK 0x03
201#define CS35L35_SP_I2S_DRV_SHIFT 0
202
203/* Class H Algorithm Control */
204#define CS35L35_CH_STEREO_MASK 0x40
205#define CS35L35_CH_STEREO_SHIFT 6
206#define CS35L35_CH_BST_OVR_MASK 0x04
207#define CS35L35_CH_BST_OVR_SHIFT 2
208#define CS35L35_CH_BST_LIM_MASK 0x08
209#define CS35L35_CH_BST_LIM_SHIFT 3
210#define CS35L35_CH_MEM_DEPTH_MASK 0x01
211#define CS35L35_CH_MEM_DEPTH_SHIFT 0
212#define CS35L35_CH_HDRM_CTL_MASK 0x3F
213#define CS35L35_CH_HDRM_CTL_SHIFT 0
214#define CS35L35_CH_REL_RATE_MASK 0xFF
215#define CS35L35_CH_REL_RATE_SHIFT 0
216#define CS35L35_CH_WKFET_DIS_MASK 0x80
217#define CS35L35_CH_WKFET_DIS_SHIFT 7
218#define CS35L35_CH_WKFET_DEL_MASK 0x70
219#define CS35L35_CH_WKFET_DEL_SHIFT 4
220#define CS35L35_CH_WKFET_THLD_MASK 0x0F
221#define CS35L35_CH_WKFET_THLD_SHIFT 0
222#define CS35L35_CH_VP_AUTO_MASK 0x80
223#define CS35L35_CH_VP_AUTO_SHIFT 7
224#define CS35L35_CH_VP_RATE_MASK 0x60
225#define CS35L35_CH_VP_RATE_SHIFT 5
226#define CS35L35_CH_VP_MAN_MASK 0x1F
227#define CS35L35_CH_VP_MAN_SHIFT 0
228
229/* CS35L35_PROT_RELEASE_CTL */
230#define CS35L35_CAL_ERR_RLS 0x80
231#define CS35L35_SHORT_RLS 0x04
232#define CS35L35_OTW_RLS 0x02
233#define CS35L35_OTE_RLS 0x01
234
235/* INT Mask Registers */
236#define CS35L35_INT1_CRIT_MASK 0x38
237#define CS35L35_INT2_CRIT_MASK 0xEF
238#define CS35L35_INT3_CRIT_MASK 0xEE
239#define CS35L35_INT4_CRIT_MASK 0xFF
240
241/* PDN DONE Masks */
242#define CS35L35_M_PDN_DONE_SHIFT 4
243#define CS35L35_M_PDN_DONE_MASK 0x10
244
245/* CS35L35_INT_1 */
246#define CS35L35_CAL_ERR 0x80
247#define CS35L35_OTP_ERR 0x40
248#define CS35L35_LRCLK_ERR 0x20
249#define CS35L35_SPCLK_ERR 0x10
250#define CS35L35_MCLK_ERR 0x08
251#define CS35L35_AMP_SHORT 0x04
252#define CS35L35_OTW 0x02
253#define CS35L35_OTE 0x01
254
255/* CS35L35_INT_2 */
256#define CS35L35_PDN_DONE 0x10
257#define CS35L35_VPBR_ERR 0x02
258#define CS35L35_VPBR_CLR 0x01
259
260/* CS35L35_INT_3 */
261#define CS35L35_BST_HIGH 0x10
262#define CS35L35_BST_HIGH_FLAG 0x08
263#define CS35L35_BST_IPK_FLAG 0x04
264#define CS35L35_LBST_SHORT 0x01
265
266/* CS35L35_INT_4 */
267#define CS35L35_VMON_OVFL 0x08
268#define CS35L35_IMON_OVFL 0x04
269
270#define CS35L35_FORMATS (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | \
271 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
272
273struct cs35l35_private {
274 struct device *dev;
275 struct cs35l35_platform_data pdata;
276 struct regmap *regmap;
277 struct regulator_bulk_data supplies[2];
278 int num_supplies;
279 int sysclk;
280 int sclk;
281 bool pdm_mode;
282 bool i2s_mode;
283 bool slave_mode;
284 /* GPIO for /RST */
285 struct gpio_desc *reset_gpio;
286 struct completion pdn_done;
287};
288
289static const char * const cs35l35_supplies[] = {
290 "VA",
291 "VP",
292};
293
294#endif
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 8c0f3b89b5bc..e78b5f055f25 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -498,7 +498,7 @@ static int cs4271_reset(struct snd_soc_codec *codec)
498 struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); 498 struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
499 499
500 if (gpio_is_valid(cs4271->gpio_nreset)) { 500 if (gpio_is_valid(cs4271->gpio_nreset)) {
501 gpio_set_value(cs4271->gpio_nreset, 0); 501 gpio_direction_output(cs4271->gpio_nreset, 0);
502 mdelay(1); 502 mdelay(1);
503 gpio_set_value(cs4271->gpio_nreset, 1); 503 gpio_set_value(cs4271->gpio_nreset, 1);
504 mdelay(1); 504 mdelay(1);
diff --git a/sound/soc/codecs/cs53l30.c b/sound/soc/codecs/cs53l30.c
index cb47fb595ff4..1e0d5973b758 100644
--- a/sound/soc/codecs/cs53l30.c
+++ b/sound/soc/codecs/cs53l30.c
@@ -1130,6 +1130,7 @@ MODULE_DEVICE_TABLE(i2c, cs53l30_id);
1130static struct i2c_driver cs53l30_i2c_driver = { 1130static struct i2c_driver cs53l30_i2c_driver = {
1131 .driver = { 1131 .driver = {
1132 .name = "cs53l30", 1132 .name = "cs53l30",
1133 .of_match_table = cs53l30_of_match,
1133 .pm = &cs53l30_runtime_pm, 1134 .pm = &cs53l30_runtime_pm,
1134 }, 1135 },
1135 .id_table = cs53l30_id, 1136 .id_table = cs53l30_id,
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index 12da55882c06..6dd7578f0bb8 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -12,6 +12,7 @@
12 * option) any later version. 12 * option) any later version.
13 */ 13 */
14 14
15#include <linux/acpi.h>
15#include <linux/clk.h> 16#include <linux/clk.h>
16#include <linux/delay.h> 17#include <linux/delay.h>
17#include <linux/i2c.h> 18#include <linux/i2c.h>
@@ -1528,12 +1529,23 @@ static int da7213_set_bias_level(struct snd_soc_codec *codec,
1528 return 0; 1529 return 0;
1529} 1530}
1530 1531
1532#if defined(CONFIG_OF)
1531/* DT */ 1533/* DT */
1532static const struct of_device_id da7213_of_match[] = { 1534static const struct of_device_id da7213_of_match[] = {
1533 { .compatible = "dlg,da7213", }, 1535 { .compatible = "dlg,da7213", },
1534 { } 1536 { }
1535}; 1537};
1536MODULE_DEVICE_TABLE(of, da7213_of_match); 1538MODULE_DEVICE_TABLE(of, da7213_of_match);
1539#endif
1540
1541#ifdef CONFIG_ACPI
1542static const struct acpi_device_id da7213_acpi_match[] = {
1543 { "DLGS7212", 0},
1544 { "DLGS7213", 0},
1545 { },
1546};
1547MODULE_DEVICE_TABLE(acpi, da7213_acpi_match);
1548#endif
1537 1549
1538static enum da7213_micbias_voltage 1550static enum da7213_micbias_voltage
1539 da7213_of_micbias_lvl(struct snd_soc_codec *codec, u32 val) 1551 da7213_of_micbias_lvl(struct snd_soc_codec *codec, u32 val)
@@ -1844,6 +1856,7 @@ static struct i2c_driver da7213_i2c_driver = {
1844 .driver = { 1856 .driver = {
1845 .name = "da7213", 1857 .name = "da7213",
1846 .of_match_table = of_match_ptr(da7213_of_match), 1858 .of_match_table = of_match_ptr(da7213_of_match),
1859 .acpi_match_table = ACPI_PTR(da7213_acpi_match),
1847 }, 1860 },
1848 .probe = da7213_i2c_probe, 1861 .probe = da7213_i2c_probe,
1849 .remove = da7213_remove, 1862 .remove = da7213_remove,
diff --git a/sound/soc/codecs/dio2125.c b/sound/soc/codecs/dio2125.c
new file mode 100644
index 000000000000..09451cd44f9b
--- /dev/null
+++ b/sound/soc/codecs/dio2125.c
@@ -0,0 +1,120 @@
1/*
2 * Copyright (c) 2017 BayLibre, SAS.
3 * Author: Jerome Brunet <jbrunet@baylibre.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 * The full GNU General Public License is included in this distribution
17 * in the file called COPYING.
18 */
19
20#include <linux/gpio/consumer.h>
21#include <linux/module.h>
22#include <sound/soc.h>
23
24#define DRV_NAME "dio2125"
25
26struct dio2125 {
27 struct gpio_desc *gpiod_enable;
28};
29
30static int drv_event(struct snd_soc_dapm_widget *w,
31 struct snd_kcontrol *control, int event)
32{
33 struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm);
34 struct dio2125 *priv = snd_soc_component_get_drvdata(c);
35 int val;
36
37 switch (event) {
38 case SND_SOC_DAPM_POST_PMU:
39 val = 1;
40 break;
41 case SND_SOC_DAPM_PRE_PMD:
42 val = 0;
43 break;
44 default:
45 WARN(1, "Unexpected event");
46 return -EINVAL;
47 }
48
49 gpiod_set_value_cansleep(priv->gpiod_enable, val);
50
51 return 0;
52}
53
54static const struct snd_soc_dapm_widget dio2125_dapm_widgets[] = {
55 SND_SOC_DAPM_INPUT("INL"),
56 SND_SOC_DAPM_INPUT("INR"),
57 SND_SOC_DAPM_OUT_DRV_E("DRV", SND_SOC_NOPM, 0, 0, NULL, 0, drv_event,
58 (SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD)),
59 SND_SOC_DAPM_OUTPUT("OUTL"),
60 SND_SOC_DAPM_OUTPUT("OUTR"),
61};
62
63static const struct snd_soc_dapm_route dio2125_dapm_routes[] = {
64 { "DRV", NULL, "INL" },
65 { "DRV", NULL, "INR" },
66 { "OUTL", NULL, "DRV" },
67 { "OUTR", NULL, "DRV" },
68};
69
70static const struct snd_soc_component_driver dio2125_component_driver = {
71 .dapm_widgets = dio2125_dapm_widgets,
72 .num_dapm_widgets = ARRAY_SIZE(dio2125_dapm_widgets),
73 .dapm_routes = dio2125_dapm_routes,
74 .num_dapm_routes = ARRAY_SIZE(dio2125_dapm_routes),
75};
76
77static int dio2125_probe(struct platform_device *pdev)
78{
79 struct device *dev = &pdev->dev;
80 struct dio2125 *priv;
81 int err;
82
83 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
84 if (priv == NULL)
85 return -ENOMEM;
86 platform_set_drvdata(pdev, priv);
87
88 priv->gpiod_enable = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
89 if (IS_ERR(priv->gpiod_enable)) {
90 err = PTR_ERR(priv->gpiod_enable);
91 if (err != -EPROBE_DEFER)
92 dev_err(dev, "Failed to get 'enable' gpio: %d", err);
93 return err;
94 }
95
96 return devm_snd_soc_register_component(dev, &dio2125_component_driver,
97 NULL, 0);
98}
99
100#ifdef CONFIG_OF
101static const struct of_device_id dio2125_ids[] = {
102 { .compatible = "dioo,dio2125", },
103 { }
104};
105MODULE_DEVICE_TABLE(of, dio2125_ids);
106#endif
107
108static struct platform_driver dio2125_driver = {
109 .driver = {
110 .name = DRV_NAME,
111 .of_match_table = of_match_ptr(dio2125_ids),
112 },
113 .probe = dio2125_probe,
114};
115
116module_platform_driver(dio2125_driver);
117
118MODULE_DESCRIPTION("ASoC DIO2125 output driver");
119MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
120MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/es7134.c b/sound/soc/codecs/es7134.c
new file mode 100644
index 000000000000..25ede825d349
--- /dev/null
+++ b/sound/soc/codecs/es7134.c
@@ -0,0 +1,116 @@
1/*
2 * Copyright (c) 2017 BayLibre, SAS.
3 * Author: Jerome Brunet <jbrunet@baylibre.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 * The full GNU General Public License is included in this distribution
17 * in the file called COPYING.
18 */
19
20#include <linux/module.h>
21#include <sound/soc.h>
22
23/*
24 * The everest 7134 is a very simple DA converter with no register
25 */
26
27static int es7134_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
28{
29 fmt &= (SND_SOC_DAIFMT_FORMAT_MASK | SND_SOC_DAIFMT_INV_MASK |
30 SND_SOC_DAIFMT_MASTER_MASK);
31
32 if (fmt != (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
33 SND_SOC_DAIFMT_CBS_CFS)) {
34 dev_err(codec_dai->dev, "Invalid DAI format\n");
35 return -EINVAL;
36 }
37
38 return 0;
39}
40
41static const struct snd_soc_dai_ops es7134_dai_ops = {
42 .set_fmt = es7134_set_fmt,
43};
44
45static struct snd_soc_dai_driver es7134_dai = {
46 .name = "es7134-hifi",
47 .playback = {
48 .stream_name = "Playback",
49 .channels_min = 2,
50 .channels_max = 2,
51 .rates = SNDRV_PCM_RATE_8000_192000,
52 .formats = (SNDRV_PCM_FMTBIT_S16_LE |
53 SNDRV_PCM_FMTBIT_S18_3LE |
54 SNDRV_PCM_FMTBIT_S20_3LE |
55 SNDRV_PCM_FMTBIT_S24_3LE |
56 SNDRV_PCM_FMTBIT_S24_LE),
57 },
58 .ops = &es7134_dai_ops,
59};
60
61static const struct snd_soc_dapm_widget es7134_dapm_widgets[] = {
62 SND_SOC_DAPM_OUTPUT("AOUTL"),
63 SND_SOC_DAPM_OUTPUT("AOUTR"),
64 SND_SOC_DAPM_DAC("DAC", "Playback", SND_SOC_NOPM, 0, 0),
65};
66
67static const struct snd_soc_dapm_route es7134_dapm_routes[] = {
68 { "AOUTL", NULL, "DAC" },
69 { "AOUTR", NULL, "DAC" },
70};
71
72static struct snd_soc_codec_driver es7134_codec_driver = {
73 .component_driver = {
74 .dapm_widgets = es7134_dapm_widgets,
75 .num_dapm_widgets = ARRAY_SIZE(es7134_dapm_widgets),
76 .dapm_routes = es7134_dapm_routes,
77 .num_dapm_routes = ARRAY_SIZE(es7134_dapm_routes),
78 },
79};
80
81static int es7134_probe(struct platform_device *pdev)
82{
83 return snd_soc_register_codec(&pdev->dev,
84 &es7134_codec_driver,
85 &es7134_dai, 1);
86}
87
88static int es7134_remove(struct platform_device *pdev)
89{
90 snd_soc_unregister_codec(&pdev->dev);
91 return 0;
92}
93
94#ifdef CONFIG_OF
95static const struct of_device_id es7134_ids[] = {
96 { .compatible = "everest,es7134", },
97 { .compatible = "everest,es7144", },
98 { }
99};
100MODULE_DEVICE_TABLE(of, es7134_ids);
101#endif
102
103static struct platform_driver es7134_driver = {
104 .driver = {
105 .name = "es7134",
106 .of_match_table = of_match_ptr(es7134_ids),
107 },
108 .probe = es7134_probe,
109 .remove = es7134_remove,
110};
111
112module_platform_driver(es7134_driver);
113
114MODULE_DESCRIPTION("ASoC ES7134 audio codec driver");
115MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
116MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
index 3f84fbd071e2..ed7cc42d1ee2 100644
--- a/sound/soc/codecs/es8328.c
+++ b/sound/soc/codecs/es8328.c
@@ -69,14 +69,10 @@ static const char * const supply_names[ES8328_SUPPLY_NUM] = {
69 "HPVDD", 69 "HPVDD",
70}; 70};
71 71
72#define ES8328_RATES (SNDRV_PCM_RATE_96000 | \ 72#define ES8328_RATES (SNDRV_PCM_RATE_192000 | \
73 SNDRV_PCM_RATE_48000 | \ 73 SNDRV_PCM_RATE_96000 | \
74 SNDRV_PCM_RATE_44100 | \ 74 SNDRV_PCM_RATE_88200 | \
75 SNDRV_PCM_RATE_32000 | \ 75 SNDRV_PCM_RATE_8000_48000)
76 SNDRV_PCM_RATE_22050 | \
77 SNDRV_PCM_RATE_16000 | \
78 SNDRV_PCM_RATE_11025 | \
79 SNDRV_PCM_RATE_8000)
80#define ES8328_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \ 76#define ES8328_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
81 SNDRV_PCM_FMTBIT_S18_3LE | \ 77 SNDRV_PCM_FMTBIT_S18_3LE | \
82 SNDRV_PCM_FMTBIT_S20_3LE | \ 78 SNDRV_PCM_FMTBIT_S20_3LE | \
@@ -91,6 +87,7 @@ struct es8328_priv {
91 int mclkdiv2; 87 int mclkdiv2;
92 const struct snd_pcm_hw_constraint_list *sysclk_constraints; 88 const struct snd_pcm_hw_constraint_list *sysclk_constraints;
93 const int *mclk_ratios; 89 const int *mclk_ratios;
90 bool master;
94 struct regulator_bulk_data supplies[ES8328_SUPPLY_NUM]; 91 struct regulator_bulk_data supplies[ES8328_SUPPLY_NUM];
95}; 92};
96 93
@@ -469,7 +466,7 @@ static int es8328_startup(struct snd_pcm_substream *substream,
469 struct snd_soc_codec *codec = dai->codec; 466 struct snd_soc_codec *codec = dai->codec;
470 struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec); 467 struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec);
471 468
472 if (es8328->sysclk_constraints) 469 if (es8328->master && es8328->sysclk_constraints)
473 snd_pcm_hw_constraint_list(substream->runtime, 0, 470 snd_pcm_hw_constraint_list(substream->runtime, 0,
474 SNDRV_PCM_HW_PARAM_RATE, 471 SNDRV_PCM_HW_PARAM_RATE,
475 es8328->sysclk_constraints); 472 es8328->sysclk_constraints);
@@ -488,27 +485,34 @@ static int es8328_hw_params(struct snd_pcm_substream *substream,
488 int wl; 485 int wl;
489 int ratio; 486 int ratio;
490 487
491 if (!es8328->sysclk_constraints) {
492 dev_err(codec->dev, "No MCLK configured\n");
493 return -EINVAL;
494 }
495
496 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 488 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
497 reg = ES8328_DACCONTROL2; 489 reg = ES8328_DACCONTROL2;
498 else 490 else
499 reg = ES8328_ADCCONTROL5; 491 reg = ES8328_ADCCONTROL5;
500 492
501 for (i = 0; i < es8328->sysclk_constraints->count; i++) 493 if (es8328->master) {
502 if (es8328->sysclk_constraints->list[i] == params_rate(params)) 494 if (!es8328->sysclk_constraints) {
503 break; 495 dev_err(codec->dev, "No MCLK configured\n");
496 return -EINVAL;
497 }
504 498
505 if (i == es8328->sysclk_constraints->count) { 499 for (i = 0; i < es8328->sysclk_constraints->count; i++)
506 dev_err(codec->dev, "LRCLK %d unsupported with current clock\n", 500 if (es8328->sysclk_constraints->list[i] ==
507 params_rate(params)); 501 params_rate(params))
508 return -EINVAL; 502 break;
503
504 if (i == es8328->sysclk_constraints->count) {
505 dev_err(codec->dev,
506 "LRCLK %d unsupported with current clock\n",
507 params_rate(params));
508 return -EINVAL;
509 }
510 ratio = es8328->mclk_ratios[i];
511 } else {
512 ratio = 0;
513 es8328->mclkdiv2 = 0;
509 } 514 }
510 515
511 ratio = es8328->mclk_ratios[i];
512 snd_soc_update_bits(codec, ES8328_MASTERMODE, 516 snd_soc_update_bits(codec, ES8328_MASTERMODE,
513 ES8328_MASTERMODE_MCLKDIV2, 517 ES8328_MASTERMODE_MCLKDIV2,
514 es8328->mclkdiv2 ? ES8328_MASTERMODE_MCLKDIV2 : 0); 518 es8328->mclkdiv2 ? ES8328_MASTERMODE_MCLKDIV2 : 0);
@@ -586,6 +590,7 @@ static int es8328_set_dai_fmt(struct snd_soc_dai *codec_dai,
586 unsigned int fmt) 590 unsigned int fmt)
587{ 591{
588 struct snd_soc_codec *codec = codec_dai->codec; 592 struct snd_soc_codec *codec = codec_dai->codec;
593 struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec);
589 u8 dac_mode = 0; 594 u8 dac_mode = 0;
590 u8 adc_mode = 0; 595 u8 adc_mode = 0;
591 596
@@ -595,11 +600,13 @@ static int es8328_set_dai_fmt(struct snd_soc_dai *codec_dai,
595 snd_soc_update_bits(codec, ES8328_MASTERMODE, 600 snd_soc_update_bits(codec, ES8328_MASTERMODE,
596 ES8328_MASTERMODE_MSC, 601 ES8328_MASTERMODE_MSC,
597 ES8328_MASTERMODE_MSC); 602 ES8328_MASTERMODE_MSC);
603 es8328->master = true;
598 break; 604 break;
599 case SND_SOC_DAIFMT_CBS_CFS: 605 case SND_SOC_DAIFMT_CBS_CFS:
600 /* Slave serial port mode */ 606 /* Slave serial port mode */
601 snd_soc_update_bits(codec, ES8328_MASTERMODE, 607 snd_soc_update_bits(codec, ES8328_MASTERMODE,
602 ES8328_MASTERMODE_MSC, 0); 608 ES8328_MASTERMODE_MSC, 0);
609 es8328->master = false;
603 break; 610 break;
604 default: 611 default:
605 return -EINVAL; 612 return -EINVAL;
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index fd272a40485b..bc2e74ff3b2d 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -469,7 +469,7 @@ static int hdac_hdmi_set_hw_params(struct snd_pcm_substream *substream,
469 469
470 format = snd_hdac_calc_stream_format(params_rate(hparams), 470 format = snd_hdac_calc_stream_format(params_rate(hparams),
471 params_channels(hparams), params_format(hparams), 471 params_channels(hparams), params_format(hparams),
472 24, 0); 472 dai->driver->playback.sig_bits, 0);
473 473
474 pcm = hdac_hdmi_get_pcm_from_cvt(hdmi, dai_map->cvt); 474 pcm = hdac_hdmi_get_pcm_from_cvt(hdmi, dai_map->cvt);
475 if (!pcm) 475 if (!pcm)
@@ -1419,8 +1419,8 @@ static int hdac_hdmi_create_dais(struct hdac_device *hdac,
1419 hdmi_dais[i].playback.rate_min = rate_min; 1419 hdmi_dais[i].playback.rate_min = rate_min;
1420 hdmi_dais[i].playback.channels_min = 2; 1420 hdmi_dais[i].playback.channels_min = 2;
1421 hdmi_dais[i].playback.channels_max = 2; 1421 hdmi_dais[i].playback.channels_max = 2;
1422 hdmi_dais[i].playback.sig_bits = bps;
1422 hdmi_dais[i].ops = &hdmi_dai_ops; 1423 hdmi_dais[i].ops = &hdmi_dai_ops;
1423
1424 i++; 1424 i++;
1425 } 1425 }
1426 1426
diff --git a/sound/soc/codecs/max9867.c b/sound/soc/codecs/max9867.c
index 6cdf15ab46de..0247edc9c84e 100644
--- a/sound/soc/codecs/max9867.c
+++ b/sound/soc/codecs/max9867.c
@@ -516,13 +516,13 @@ static const struct i2c_device_id max9867_i2c_id[] = {
516 { "max9867", 0 }, 516 { "max9867", 0 },
517 { } 517 { }
518}; 518};
519MODULE_DEVICE_TABLE(i2c, max9867_i2c_id);
519 520
520static const struct of_device_id max9867_of_match[] = { 521static const struct of_device_id max9867_of_match[] = {
521 { .compatible = "maxim,max9867", }, 522 { .compatible = "maxim,max9867", },
522 { } 523 { }
523}; 524};
524 525MODULE_DEVICE_TABLE(of, max9867_of_match);
525MODULE_DEVICE_TABLE(i2c, max9867_i2c_id);
526 526
527static const struct dev_pm_ops max9867_pm_ops = { 527static const struct dev_pm_ops max9867_pm_ops = {
528 SET_SYSTEM_SLEEP_PM_OPS(max9867_suspend, max9867_resume) 528 SET_SYSTEM_SLEEP_PM_OPS(max9867_suspend, max9867_resume)
diff --git a/sound/soc/codecs/max98927.c b/sound/soc/codecs/max98927.c
new file mode 100644
index 000000000000..b5ee29499e16
--- /dev/null
+++ b/sound/soc/codecs/max98927.c
@@ -0,0 +1,841 @@
1/*
2 * max98927.c -- MAX98927 ALSA Soc Audio driver
3 *
4 * Copyright (C) 2016 Maxim Integrated Products
5 * Author: Ryan Lee <ryans.lee@maximintegrated.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13#include <linux/acpi.h>
14#include <linux/i2c.h>
15#include <linux/module.h>
16#include <linux/regmap.h>
17#include <linux/slab.h>
18#include <linux/cdev.h>
19#include <sound/pcm.h>
20#include <sound/pcm_params.h>
21#include <sound/soc.h>
22#include <linux/gpio.h>
23#include <linux/of_gpio.h>
24#include <sound/tlv.h>
25#include "max98927.h"
26
27static struct reg_default max98927_reg[] = {
28 {MAX98927_R0001_INT_RAW1, 0x00},
29 {MAX98927_R0002_INT_RAW2, 0x00},
30 {MAX98927_R0003_INT_RAW3, 0x00},
31 {MAX98927_R0004_INT_STATE1, 0x00},
32 {MAX98927_R0005_INT_STATE2, 0x00},
33 {MAX98927_R0006_INT_STATE3, 0x00},
34 {MAX98927_R0007_INT_FLAG1, 0x00},
35 {MAX98927_R0008_INT_FLAG2, 0x00},
36 {MAX98927_R0009_INT_FLAG3, 0x00},
37 {MAX98927_R000A_INT_EN1, 0x00},
38 {MAX98927_R000B_INT_EN2, 0x00},
39 {MAX98927_R000C_INT_EN3, 0x00},
40 {MAX98927_R000D_INT_FLAG_CLR1, 0x00},
41 {MAX98927_R000E_INT_FLAG_CLR2, 0x00},
42 {MAX98927_R000F_INT_FLAG_CLR3, 0x00},
43 {MAX98927_R0010_IRQ_CTRL, 0x00},
44 {MAX98927_R0011_CLK_MON, 0x00},
45 {MAX98927_R0012_WDOG_CTRL, 0x00},
46 {MAX98927_R0013_WDOG_RST, 0x00},
47 {MAX98927_R0014_MEAS_ADC_THERM_WARN_THRESH, 0x00},
48 {MAX98927_R0015_MEAS_ADC_THERM_SHDN_THRESH, 0x00},
49 {MAX98927_R0016_MEAS_ADC_THERM_HYSTERESIS, 0x00},
50 {MAX98927_R0017_PIN_CFG, 0x55},
51 {MAX98927_R0018_PCM_RX_EN_A, 0x00},
52 {MAX98927_R0019_PCM_RX_EN_B, 0x00},
53 {MAX98927_R001A_PCM_TX_EN_A, 0x00},
54 {MAX98927_R001B_PCM_TX_EN_B, 0x00},
55 {MAX98927_R001C_PCM_TX_HIZ_CTRL_A, 0x00},
56 {MAX98927_R001D_PCM_TX_HIZ_CTRL_B, 0x00},
57 {MAX98927_R001E_PCM_TX_CH_SRC_A, 0x00},
58 {MAX98927_R001F_PCM_TX_CH_SRC_B, 0x00},
59 {MAX98927_R0020_PCM_MODE_CFG, 0x40},
60 {MAX98927_R0021_PCM_MASTER_MODE, 0x00},
61 {MAX98927_R0022_PCM_CLK_SETUP, 0x22},
62 {MAX98927_R0023_PCM_SR_SETUP1, 0x00},
63 {MAX98927_R0024_PCM_SR_SETUP2, 0x00},
64 {MAX98927_R0025_PCM_TO_SPK_MONOMIX_A, 0x00},
65 {MAX98927_R0026_PCM_TO_SPK_MONOMIX_B, 0x00},
66 {MAX98927_R0027_ICC_RX_EN_A, 0x00},
67 {MAX98927_R0028_ICC_RX_EN_B, 0x00},
68 {MAX98927_R002B_ICC_TX_EN_A, 0x00},
69 {MAX98927_R002C_ICC_TX_EN_B, 0x00},
70 {MAX98927_R002E_ICC_HIZ_MANUAL_MODE, 0x00},
71 {MAX98927_R002F_ICC_TX_HIZ_EN_A, 0x00},
72 {MAX98927_R0030_ICC_TX_HIZ_EN_B, 0x00},
73 {MAX98927_R0031_ICC_LNK_EN, 0x00},
74 {MAX98927_R0032_PDM_TX_EN, 0x00},
75 {MAX98927_R0033_PDM_TX_HIZ_CTRL, 0x00},
76 {MAX98927_R0034_PDM_TX_CTRL, 0x00},
77 {MAX98927_R0035_PDM_RX_CTRL, 0x00},
78 {MAX98927_R0036_AMP_VOL_CTRL, 0x00},
79 {MAX98927_R0037_AMP_DSP_CFG, 0x02},
80 {MAX98927_R0038_TONE_GEN_DC_CFG, 0x00},
81 {MAX98927_R0039_DRE_CTRL, 0x01},
82 {MAX98927_R003A_AMP_EN, 0x00},
83 {MAX98927_R003B_SPK_SRC_SEL, 0x00},
84 {MAX98927_R003C_SPK_GAIN, 0x00},
85 {MAX98927_R003D_SSM_CFG, 0x01},
86 {MAX98927_R003E_MEAS_EN, 0x00},
87 {MAX98927_R003F_MEAS_DSP_CFG, 0x04},
88 {MAX98927_R0040_BOOST_CTRL0, 0x00},
89 {MAX98927_R0041_BOOST_CTRL3, 0x00},
90 {MAX98927_R0042_BOOST_CTRL1, 0x00},
91 {MAX98927_R0043_MEAS_ADC_CFG, 0x00},
92 {MAX98927_R0044_MEAS_ADC_BASE_MSB, 0x00},
93 {MAX98927_R0045_MEAS_ADC_BASE_LSB, 0x00},
94 {MAX98927_R0046_ADC_CH0_DIVIDE, 0x00},
95 {MAX98927_R0047_ADC_CH1_DIVIDE, 0x00},
96 {MAX98927_R0048_ADC_CH2_DIVIDE, 0x00},
97 {MAX98927_R0049_ADC_CH0_FILT_CFG, 0x00},
98 {MAX98927_R004A_ADC_CH1_FILT_CFG, 0x00},
99 {MAX98927_R004B_ADC_CH2_FILT_CFG, 0x00},
100 {MAX98927_R004C_MEAS_ADC_CH0_READ, 0x00},
101 {MAX98927_R004D_MEAS_ADC_CH1_READ, 0x00},
102 {MAX98927_R004E_MEAS_ADC_CH2_READ, 0x00},
103 {MAX98927_R0051_BROWNOUT_STATUS, 0x00},
104 {MAX98927_R0052_BROWNOUT_EN, 0x00},
105 {MAX98927_R0053_BROWNOUT_INFINITE_HOLD, 0x00},
106 {MAX98927_R0054_BROWNOUT_INFINITE_HOLD_CLR, 0x00},
107 {MAX98927_R0055_BROWNOUT_LVL_HOLD, 0x00},
108 {MAX98927_R005A_BROWNOUT_LVL1_THRESH, 0x00},
109 {MAX98927_R005B_BROWNOUT_LVL2_THRESH, 0x00},
110 {MAX98927_R005C_BROWNOUT_LVL3_THRESH, 0x00},
111 {MAX98927_R005D_BROWNOUT_LVL4_THRESH, 0x00},
112 {MAX98927_R005E_BROWNOUT_THRESH_HYSTERYSIS, 0x00},
113 {MAX98927_R005F_BROWNOUT_AMP_LIMITER_ATK_REL, 0x00},
114 {MAX98927_R0060_BROWNOUT_AMP_GAIN_ATK_REL, 0x00},
115 {MAX98927_R0061_BROWNOUT_AMP1_CLIP_MODE, 0x00},
116 {MAX98927_R0072_BROWNOUT_LVL1_CUR_LIMIT, 0x00},
117 {MAX98927_R0073_BROWNOUT_LVL1_AMP1_CTRL1, 0x00},
118 {MAX98927_R0074_BROWNOUT_LVL1_AMP1_CTRL2, 0x00},
119 {MAX98927_R0075_BROWNOUT_LVL1_AMP1_CTRL3, 0x00},
120 {MAX98927_R0076_BROWNOUT_LVL2_CUR_LIMIT, 0x00},
121 {MAX98927_R0077_BROWNOUT_LVL2_AMP1_CTRL1, 0x00},
122 {MAX98927_R0078_BROWNOUT_LVL2_AMP1_CTRL2, 0x00},
123 {MAX98927_R0079_BROWNOUT_LVL2_AMP1_CTRL3, 0x00},
124 {MAX98927_R007A_BROWNOUT_LVL3_CUR_LIMIT, 0x00},
125 {MAX98927_R007B_BROWNOUT_LVL3_AMP1_CTRL1, 0x00},
126 {MAX98927_R007C_BROWNOUT_LVL3_AMP1_CTRL2, 0x00},
127 {MAX98927_R007D_BROWNOUT_LVL3_AMP1_CTRL3, 0x00},
128 {MAX98927_R007E_BROWNOUT_LVL4_CUR_LIMIT, 0x00},
129 {MAX98927_R007F_BROWNOUT_LVL4_AMP1_CTRL1, 0x00},
130 {MAX98927_R0080_BROWNOUT_LVL4_AMP1_CTRL2, 0x00},
131 {MAX98927_R0081_BROWNOUT_LVL4_AMP1_CTRL3, 0x00},
132 {MAX98927_R0082_ENV_TRACK_VOUT_HEADROOM, 0x00},
133 {MAX98927_R0083_ENV_TRACK_BOOST_VOUT_DELAY, 0x00},
134 {MAX98927_R0084_ENV_TRACK_REL_RATE, 0x00},
135 {MAX98927_R0085_ENV_TRACK_HOLD_RATE, 0x00},
136 {MAX98927_R0086_ENV_TRACK_CTRL, 0x00},
137 {MAX98927_R0087_ENV_TRACK_BOOST_VOUT_READ, 0x00},
138 {MAX98927_R00FF_GLOBAL_SHDN, 0x00},
139 {MAX98927_R0100_SOFT_RESET, 0x00},
140 {MAX98927_R01FF_REV_ID, 0x40},
141};
142
143static int max98927_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
144{
145 struct snd_soc_codec *codec = codec_dai->codec;
146 struct max98927_priv *max98927 = snd_soc_codec_get_drvdata(codec);
147 unsigned int mode = 0;
148 unsigned int format = 0;
149 unsigned int invert = 0;
150
151 dev_dbg(codec->dev, "%s: fmt 0x%08X\n", __func__, fmt);
152
153 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
154 case SND_SOC_DAIFMT_CBS_CFS:
155 mode = MAX98927_PCM_MASTER_MODE_SLAVE;
156 break;
157 case SND_SOC_DAIFMT_CBM_CFM:
158 max98927->master = true;
159 mode = MAX98927_PCM_MASTER_MODE_MASTER;
160 break;
161 default:
162 dev_err(codec->dev, "DAI clock mode unsupported");
163 return -EINVAL;
164 }
165
166 regmap_update_bits(max98927->regmap,
167 MAX98927_R0021_PCM_MASTER_MODE,
168 MAX98927_PCM_MASTER_MODE_MASK,
169 mode);
170
171 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
172 case SND_SOC_DAIFMT_NB_NF:
173 break;
174 case SND_SOC_DAIFMT_IB_NF:
175 invert = MAX98927_PCM_MODE_CFG_PCM_BCLKEDGE;
176 break;
177 default:
178 dev_err(codec->dev, "DAI invert mode unsupported");
179 return -EINVAL;
180 }
181
182 regmap_update_bits(max98927->regmap,
183 MAX98927_R0020_PCM_MODE_CFG,
184 MAX98927_PCM_MODE_CFG_PCM_BCLKEDGE,
185 invert);
186
187 /* interface format */
188 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
189 case SND_SOC_DAIFMT_I2S:
190 max98927->iface |= SND_SOC_DAIFMT_I2S;
191 format = MAX98927_PCM_FORMAT_I2S;
192 break;
193 case SND_SOC_DAIFMT_LEFT_J:
194 max98927->iface |= SND_SOC_DAIFMT_LEFT_J;
195 format = MAX98927_PCM_FORMAT_LJ;
196 break;
197 case SND_SOC_DAIFMT_PDM:
198 max98927->iface |= SND_SOC_DAIFMT_PDM;
199 break;
200 default:
201 return -EINVAL;
202 }
203
204 /* pcm channel configuration */
205 if (max98927->iface & (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_LEFT_J)) {
206 regmap_update_bits(max98927->regmap,
207 MAX98927_R0018_PCM_RX_EN_A,
208 MAX98927_PCM_RX_CH0_EN | MAX98927_PCM_RX_CH1_EN,
209 MAX98927_PCM_RX_CH0_EN | MAX98927_PCM_RX_CH1_EN);
210
211 regmap_update_bits(max98927->regmap,
212 MAX98927_R0020_PCM_MODE_CFG,
213 MAX98927_PCM_MODE_CFG_FORMAT_MASK,
214 format << MAX98927_PCM_MODE_CFG_FORMAT_SHIFT);
215
216 regmap_update_bits(max98927->regmap,
217 MAX98927_R003B_SPK_SRC_SEL,
218 MAX98927_SPK_SRC_MASK, 0);
219
220 } else
221 regmap_update_bits(max98927->regmap,
222 MAX98927_R0018_PCM_RX_EN_A,
223 MAX98927_PCM_RX_CH0_EN | MAX98927_PCM_RX_CH1_EN, 0);
224
225 /* pdm channel configuration */
226 if (max98927->iface & SND_SOC_DAIFMT_PDM) {
227 regmap_update_bits(max98927->regmap,
228 MAX98927_R0035_PDM_RX_CTRL,
229 MAX98927_PDM_RX_EN_MASK, 1);
230
231 regmap_update_bits(max98927->regmap,
232 MAX98927_R003B_SPK_SRC_SEL,
233 MAX98927_SPK_SRC_MASK, 3);
234 } else
235 regmap_update_bits(max98927->regmap,
236 MAX98927_R0035_PDM_RX_CTRL,
237 MAX98927_PDM_RX_EN_MASK, 0);
238 return 0;
239}
240
241/* codec MCLK rate in master mode */
242static const int rate_table[] = {
243 5644800, 6000000, 6144000, 6500000,
244 9600000, 11289600, 12000000, 12288000,
245 13000000, 19200000,
246};
247
248static int max98927_set_clock(struct max98927_priv *max98927,
249 struct snd_pcm_hw_params *params)
250{
251 struct snd_soc_codec *codec = max98927->codec;
252 /* BCLK/LRCLK ratio calculation */
253 int blr_clk_ratio = params_channels(params) * max98927->ch_size;
254 int value;
255
256 if (max98927->master) {
257 int i;
258 /* match rate to closest value */
259 for (i = 0; i < ARRAY_SIZE(rate_table); i++) {
260 if (rate_table[i] >= max98927->sysclk)
261 break;
262 }
263 if (i == ARRAY_SIZE(rate_table)) {
264 dev_err(codec->dev, "failed to find proper clock rate.\n");
265 return -EINVAL;
266 }
267 regmap_update_bits(max98927->regmap,
268 MAX98927_R0021_PCM_MASTER_MODE,
269 MAX98927_PCM_MASTER_MODE_MCLK_MASK,
270 i << MAX98927_PCM_MASTER_MODE_MCLK_RATE_SHIFT);
271 }
272
273 switch (blr_clk_ratio) {
274 case 32:
275 value = 2;
276 break;
277 case 48:
278 value = 3;
279 break;
280 case 64:
281 value = 4;
282 break;
283 default:
284 return -EINVAL;
285 }
286 regmap_update_bits(max98927->regmap,
287 MAX98927_R0022_PCM_CLK_SETUP,
288 MAX98927_PCM_CLK_SETUP_BSEL_MASK,
289 value);
290 return 0;
291}
292
293static int max98927_dai_hw_params(struct snd_pcm_substream *substream,
294 struct snd_pcm_hw_params *params,
295 struct snd_soc_dai *dai)
296{
297 struct snd_soc_codec *codec = dai->codec;
298 struct max98927_priv *max98927 = snd_soc_codec_get_drvdata(codec);
299 unsigned int sampling_rate = 0;
300 unsigned int chan_sz = 0;
301
302 /* pcm mode configuration */
303 switch (snd_pcm_format_width(params_format(params))) {
304 case 16:
305 chan_sz = MAX98927_PCM_MODE_CFG_CHANSZ_16;
306 break;
307 case 24:
308 chan_sz = MAX98927_PCM_MODE_CFG_CHANSZ_24;
309 break;
310 case 32:
311 chan_sz = MAX98927_PCM_MODE_CFG_CHANSZ_32;
312 break;
313 default:
314 dev_err(codec->dev, "format unsupported %d",
315 params_format(params));
316 goto err;
317 }
318
319 max98927->ch_size = snd_pcm_format_width(params_format(params));
320
321 regmap_update_bits(max98927->regmap,
322 MAX98927_R0020_PCM_MODE_CFG,
323 MAX98927_PCM_MODE_CFG_CHANSZ_MASK, chan_sz);
324
325 dev_dbg(codec->dev, "format supported %d",
326 params_format(params));
327
328 /* sampling rate configuration */
329 switch (params_rate(params)) {
330 case 8000:
331 sampling_rate = MAX98927_PCM_SR_SET1_SR_8000;
332 break;
333 case 11025:
334 sampling_rate = MAX98927_PCM_SR_SET1_SR_11025;
335 break;
336 case 12000:
337 sampling_rate = MAX98927_PCM_SR_SET1_SR_12000;
338 break;
339 case 16000:
340 sampling_rate = MAX98927_PCM_SR_SET1_SR_16000;
341 break;
342 case 22050:
343 sampling_rate = MAX98927_PCM_SR_SET1_SR_22050;
344 break;
345 case 24000:
346 sampling_rate = MAX98927_PCM_SR_SET1_SR_24000;
347 break;
348 case 32000:
349 sampling_rate = MAX98927_PCM_SR_SET1_SR_32000;
350 break;
351 case 44100:
352 sampling_rate = MAX98927_PCM_SR_SET1_SR_44100;
353 break;
354 case 48000:
355 sampling_rate = MAX98927_PCM_SR_SET1_SR_48000;
356 break;
357 default:
358 dev_err(codec->dev, "rate %d not supported\n",
359 params_rate(params));
360 goto err;
361 }
362 /* set DAI_SR to correct LRCLK frequency */
363 regmap_update_bits(max98927->regmap,
364 MAX98927_R0023_PCM_SR_SETUP1,
365 MAX98927_PCM_SR_SET1_SR_MASK,
366 sampling_rate);
367 regmap_update_bits(max98927->regmap,
368 MAX98927_R0024_PCM_SR_SETUP2,
369 MAX98927_PCM_SR_SET2_SR_MASK,
370 sampling_rate << MAX98927_PCM_SR_SET2_SR_SHIFT);
371
372 /* set sampling rate of IV */
373 if (max98927->interleave_mode &&
374 sampling_rate > MAX98927_PCM_SR_SET1_SR_16000)
375 regmap_update_bits(max98927->regmap,
376 MAX98927_R0024_PCM_SR_SETUP2,
377 MAX98927_PCM_SR_SET2_IVADC_SR_MASK,
378 sampling_rate - 3);
379 else
380 regmap_update_bits(max98927->regmap,
381 MAX98927_R0024_PCM_SR_SETUP2,
382 MAX98927_PCM_SR_SET2_IVADC_SR_MASK,
383 sampling_rate);
384 return max98927_set_clock(max98927, params);
385err:
386 return -EINVAL;
387}
388
389#define MAX98927_RATES SNDRV_PCM_RATE_8000_48000
390
391#define MAX98927_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
392 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
393
394static int max98927_dai_set_sysclk(struct snd_soc_dai *dai,
395 int clk_id, unsigned int freq, int dir)
396{
397 struct snd_soc_codec *codec = dai->codec;
398 struct max98927_priv *max98927 = snd_soc_codec_get_drvdata(codec);
399
400 max98927->sysclk = freq;
401 return 0;
402}
403
404static const struct snd_soc_dai_ops max98927_dai_ops = {
405 .set_sysclk = max98927_dai_set_sysclk,
406 .set_fmt = max98927_dai_set_fmt,
407 .hw_params = max98927_dai_hw_params,
408};
409
410static int max98927_dac_event(struct snd_soc_dapm_widget *w,
411 struct snd_kcontrol *kcontrol, int event)
412{
413 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
414 struct max98927_priv *max98927 = snd_soc_codec_get_drvdata(codec);
415
416 switch (event) {
417 case SND_SOC_DAPM_POST_PMU:
418 regmap_update_bits(max98927->regmap,
419 MAX98927_R003A_AMP_EN,
420 MAX98927_AMP_EN_MASK, 1);
421 /* enable VMON and IMON */
422 regmap_update_bits(max98927->regmap,
423 MAX98927_R003E_MEAS_EN,
424 MAX98927_MEAS_V_EN | MAX98927_MEAS_I_EN,
425 MAX98927_MEAS_V_EN | MAX98927_MEAS_I_EN);
426 regmap_update_bits(max98927->regmap,
427 MAX98927_R00FF_GLOBAL_SHDN,
428 MAX98927_GLOBAL_EN_MASK, 1);
429 break;
430 case SND_SOC_DAPM_POST_PMD:
431 regmap_update_bits(max98927->regmap,
432 MAX98927_R00FF_GLOBAL_SHDN,
433 MAX98927_GLOBAL_EN_MASK, 0);
434 regmap_update_bits(max98927->regmap,
435 MAX98927_R003A_AMP_EN,
436 MAX98927_AMP_EN_MASK, 0);
437 /* disable VMON and IMON */
438 regmap_update_bits(max98927->regmap,
439 MAX98927_R003E_MEAS_EN,
440 MAX98927_MEAS_V_EN | MAX98927_MEAS_I_EN, 0);
441 break;
442 default:
443 return 0;
444 }
445 return 0;
446}
447
448static const char * const max98927_switch_text[] = {
449 "Left", "Right", "LeftRight"};
450
451static const struct soc_enum dai_sel_enum =
452 SOC_ENUM_SINGLE(MAX98927_R0025_PCM_TO_SPK_MONOMIX_A,
453 MAX98927_PCM_TO_SPK_MONOMIX_CFG_SHIFT,
454 3, max98927_switch_text);
455
456static const struct snd_kcontrol_new max98927_dai_controls =
457 SOC_DAPM_ENUM("DAI Sel", dai_sel_enum);
458
459static const struct snd_soc_dapm_widget max98927_dapm_widgets[] = {
460 SND_SOC_DAPM_AIF_IN("DAI_OUT", "HiFi Playback", 0, SND_SOC_NOPM, 0, 0),
461 SND_SOC_DAPM_DAC_E("Amp Enable", "HiFi Playback", MAX98927_R003A_AMP_EN,
462 0, 0, max98927_dac_event,
463 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
464 SND_SOC_DAPM_MUX("DAI Sel Mux", SND_SOC_NOPM, 0, 0,
465 &max98927_dai_controls),
466 SND_SOC_DAPM_OUTPUT("BE_OUT"),
467};
468
469static DECLARE_TLV_DB_SCALE(max98927_spk_tlv, 300, 300, 0);
470static DECLARE_TLV_DB_SCALE(max98927_digital_tlv, -1600, 25, 0);
471
472static bool max98927_readable_register(struct device *dev, unsigned int reg)
473{
474 switch (reg) {
475 case MAX98927_R0001_INT_RAW1 ... MAX98927_R0028_ICC_RX_EN_B:
476 case MAX98927_R002B_ICC_TX_EN_A ... MAX98927_R002C_ICC_TX_EN_B:
477 case MAX98927_R002E_ICC_HIZ_MANUAL_MODE
478 ... MAX98927_R004E_MEAS_ADC_CH2_READ:
479 case MAX98927_R0051_BROWNOUT_STATUS
480 ... MAX98927_R0055_BROWNOUT_LVL_HOLD:
481 case MAX98927_R005A_BROWNOUT_LVL1_THRESH
482 ... MAX98927_R0061_BROWNOUT_AMP1_CLIP_MODE:
483 case MAX98927_R0072_BROWNOUT_LVL1_CUR_LIMIT
484 ... MAX98927_R0087_ENV_TRACK_BOOST_VOUT_READ:
485 case MAX98927_R00FF_GLOBAL_SHDN:
486 case MAX98927_R0100_SOFT_RESET:
487 case MAX98927_R01FF_REV_ID:
488 return true;
489 default:
490 return false;
491 }
492};
493
494static bool max98927_volatile_reg(struct device *dev, unsigned int reg)
495{
496 switch (reg) {
497 case MAX98927_R0001_INT_RAW1 ... MAX98927_R0009_INT_FLAG3:
498 return true;
499 default:
500 return false;
501 }
502}
503
504static const char * const max98927_boost_voltage_text[] = {
505 "6.5V", "6.625V", "6.75V", "6.875V", "7V", "7.125V", "7.25V", "7.375V",
506 "7.5V", "7.625V", "7.75V", "7.875V", "8V", "8.125V", "8.25V", "8.375V",
507 "8.5V", "8.625V", "8.75V", "8.875V", "9V", "9.125V", "9.25V", "9.375V",
508 "9.5V", "9.625V", "9.75V", "9.875V", "10V"
509};
510
511static SOC_ENUM_SINGLE_DECL(max98927_boost_voltage,
512 MAX98927_R0040_BOOST_CTRL0, 0,
513 max98927_boost_voltage_text);
514
515static const char * const max98927_current_limit_text[] = {
516 "1.00A", "1.10A", "1.20A", "1.30A", "1.40A", "1.50A", "1.60A", "1.70A",
517 "1.80A", "1.90A", "2.00A", "2.10A", "2.20A", "2.30A", "2.40A", "2.50A",
518 "2.60A", "2.70A", "2.80A", "2.90A", "3.00A", "3.10A", "3.20A", "3.30A",
519 "3.40A", "3.50A", "3.60A", "3.70A", "3.80A", "3.90A", "4.00A", "4.10A"
520};
521
522static SOC_ENUM_SINGLE_DECL(max98927_current_limit,
523 MAX98927_R0042_BOOST_CTRL1, 1,
524 max98927_current_limit_text);
525
526static const struct snd_kcontrol_new max98927_snd_controls[] = {
527 SOC_SINGLE_TLV("Speaker Volume", MAX98927_R003C_SPK_GAIN,
528 0, 6, 0,
529 max98927_spk_tlv),
530 SOC_SINGLE_TLV("Digital Volume", MAX98927_R0036_AMP_VOL_CTRL,
531 0, (1<<MAX98927_AMP_VOL_WIDTH)-1, 0,
532 max98927_digital_tlv),
533 SOC_SINGLE("Amp DSP Switch", MAX98927_R0052_BROWNOUT_EN,
534 MAX98927_BROWNOUT_DSP_SHIFT, 1, 0),
535 SOC_SINGLE("Ramp Switch", MAX98927_R0037_AMP_DSP_CFG,
536 MAX98927_AMP_DSP_CFG_RMP_SHIFT, 1, 0),
537 SOC_SINGLE("DRE Switch", MAX98927_R0039_DRE_CTRL,
538 MAX98927_DRE_EN_SHIFT, 1, 0),
539 SOC_SINGLE("Volume Location Switch", MAX98927_R0036_AMP_VOL_CTRL,
540 MAX98927_AMP_VOL_SEL_SHIFT, 1, 0),
541 SOC_ENUM("Boost Output Voltage", max98927_boost_voltage),
542 SOC_ENUM("Current Limit", max98927_current_limit),
543};
544
545static const struct snd_soc_dapm_route max98927_audio_map[] = {
546 {"Amp Enable", NULL, "DAI_OUT"},
547 {"DAI Sel Mux", "Left", "Amp Enable"},
548 {"DAI Sel Mux", "Right", "Amp Enable"},
549 {"DAI Sel Mux", "LeftRight", "Amp Enable"},
550 {"BE_OUT", NULL, "DAI Sel Mux"},
551};
552
553static struct snd_soc_dai_driver max98927_dai[] = {
554 {
555 .name = "max98927-aif1",
556 .playback = {
557 .stream_name = "HiFi Playback",
558 .channels_min = 1,
559 .channels_max = 2,
560 .rates = MAX98927_RATES,
561 .formats = MAX98927_FORMATS,
562 },
563 .capture = {
564 .stream_name = "HiFi Capture",
565 .channels_min = 1,
566 .channels_max = 2,
567 .rates = MAX98927_RATES,
568 .formats = MAX98927_FORMATS,
569 },
570 .ops = &max98927_dai_ops,
571 }
572};
573
574static int max98927_probe(struct snd_soc_codec *codec)
575{
576 struct max98927_priv *max98927 = snd_soc_codec_get_drvdata(codec);
577
578 max98927->codec = codec;
579 codec->control_data = max98927->regmap;
580 codec->cache_bypass = 1;
581
582 /* Software Reset */
583 regmap_write(max98927->regmap,
584 MAX98927_R0100_SOFT_RESET, MAX98927_SOFT_RESET);
585
586 /* IV default slot configuration */
587 regmap_write(max98927->regmap,
588 MAX98927_R001C_PCM_TX_HIZ_CTRL_A,
589 0xFF);
590 regmap_write(max98927->regmap,
591 MAX98927_R001D_PCM_TX_HIZ_CTRL_B,
592 0xFF);
593 regmap_write(max98927->regmap,
594 MAX98927_R0025_PCM_TO_SPK_MONOMIX_A,
595 0x80);
596 regmap_write(max98927->regmap,
597 MAX98927_R0026_PCM_TO_SPK_MONOMIX_B,
598 0x1);
599 /* Set inital volume (+13dB) */
600 regmap_write(max98927->regmap,
601 MAX98927_R0036_AMP_VOL_CTRL,
602 0x38);
603 regmap_write(max98927->regmap,
604 MAX98927_R003C_SPK_GAIN,
605 0x05);
606 /* Enable DC blocker */
607 regmap_write(max98927->regmap,
608 MAX98927_R0037_AMP_DSP_CFG,
609 0x03);
610 /* Enable IMON VMON DC blocker */
611 regmap_write(max98927->regmap,
612 MAX98927_R003F_MEAS_DSP_CFG,
613 0xF7);
614 /* Boost Output Voltage & Current limit */
615 regmap_write(max98927->regmap,
616 MAX98927_R0040_BOOST_CTRL0,
617 0x1C);
618 regmap_write(max98927->regmap,
619 MAX98927_R0042_BOOST_CTRL1,
620 0x3E);
621 /* Measurement ADC config */
622 regmap_write(max98927->regmap,
623 MAX98927_R0043_MEAS_ADC_CFG,
624 0x04);
625 regmap_write(max98927->regmap,
626 MAX98927_R0044_MEAS_ADC_BASE_MSB,
627 0x00);
628 regmap_write(max98927->regmap,
629 MAX98927_R0045_MEAS_ADC_BASE_LSB,
630 0x24);
631 /* Brownout Level */
632 regmap_write(max98927->regmap,
633 MAX98927_R007F_BROWNOUT_LVL4_AMP1_CTRL1,
634 0x06);
635 /* Envelope Tracking configuration */
636 regmap_write(max98927->regmap,
637 MAX98927_R0082_ENV_TRACK_VOUT_HEADROOM,
638 0x08);
639 regmap_write(max98927->regmap,
640 MAX98927_R0086_ENV_TRACK_CTRL,
641 0x01);
642 regmap_write(max98927->regmap,
643 MAX98927_R0087_ENV_TRACK_BOOST_VOUT_READ,
644 0x10);
645
646 /* voltage, current slot configuration */
647 regmap_write(max98927->regmap,
648 MAX98927_R001E_PCM_TX_CH_SRC_A,
649 (max98927->i_l_slot<<MAX98927_PCM_TX_CH_SRC_A_I_SHIFT|
650 max98927->v_l_slot)&0xFF);
651
652 if (max98927->v_l_slot < 8) {
653 regmap_update_bits(max98927->regmap,
654 MAX98927_R001C_PCM_TX_HIZ_CTRL_A,
655 1 << max98927->v_l_slot, 0);
656 regmap_update_bits(max98927->regmap,
657 MAX98927_R001A_PCM_TX_EN_A,
658 1 << max98927->v_l_slot,
659 1 << max98927->v_l_slot);
660 } else {
661 regmap_update_bits(max98927->regmap,
662 MAX98927_R001D_PCM_TX_HIZ_CTRL_B,
663 1 << (max98927->v_l_slot - 8), 0);
664 regmap_update_bits(max98927->regmap,
665 MAX98927_R001B_PCM_TX_EN_B,
666 1 << (max98927->v_l_slot - 8),
667 1 << (max98927->v_l_slot - 8));
668 }
669
670 if (max98927->i_l_slot < 8) {
671 regmap_update_bits(max98927->regmap,
672 MAX98927_R001C_PCM_TX_HIZ_CTRL_A,
673 1 << max98927->i_l_slot, 0);
674 regmap_update_bits(max98927->regmap,
675 MAX98927_R001A_PCM_TX_EN_A,
676 1 << max98927->i_l_slot,
677 1 << max98927->i_l_slot);
678 } else {
679 regmap_update_bits(max98927->regmap,
680 MAX98927_R001D_PCM_TX_HIZ_CTRL_B,
681 1 << (max98927->i_l_slot - 8), 0);
682 regmap_update_bits(max98927->regmap,
683 MAX98927_R001B_PCM_TX_EN_B,
684 1 << (max98927->i_l_slot - 8),
685 1 << (max98927->i_l_slot - 8));
686 }
687
688 /* Set interleave mode */
689 if (max98927->interleave_mode)
690 regmap_update_bits(max98927->regmap,
691 MAX98927_R001F_PCM_TX_CH_SRC_B,
692 MAX98927_PCM_TX_CH_INTERLEAVE_MASK,
693 MAX98927_PCM_TX_CH_INTERLEAVE_MASK);
694 return 0;
695}
696
697static const struct snd_soc_codec_driver soc_codec_dev_max98927 = {
698 .probe = max98927_probe,
699 .component_driver = {
700 .controls = max98927_snd_controls,
701 .num_controls = ARRAY_SIZE(max98927_snd_controls),
702 .dapm_widgets = max98927_dapm_widgets,
703 .num_dapm_widgets = ARRAY_SIZE(max98927_dapm_widgets),
704 .dapm_routes = max98927_audio_map,
705 .num_dapm_routes = ARRAY_SIZE(max98927_audio_map),
706 },
707};
708
709static const struct regmap_config max98927_regmap = {
710 .reg_bits = 16,
711 .val_bits = 8,
712 .max_register = MAX98927_R01FF_REV_ID,
713 .reg_defaults = max98927_reg,
714 .num_reg_defaults = ARRAY_SIZE(max98927_reg),
715 .readable_reg = max98927_readable_register,
716 .volatile_reg = max98927_volatile_reg,
717 .cache_type = REGCACHE_RBTREE,
718};
719
720static void max98927_slot_config(struct i2c_client *i2c,
721 struct max98927_priv *max98927)
722{
723 int value;
724
725 if (!of_property_read_u32(i2c->dev.of_node,
726 "vmon-slot-no", &value))
727 max98927->v_l_slot = value & 0xF;
728 else
729 max98927->v_l_slot = 0;
730 if (!of_property_read_u32(i2c->dev.of_node,
731 "imon-slot-no", &value))
732 max98927->i_l_slot = value & 0xF;
733 else
734 max98927->i_l_slot = 1;
735}
736
737static int max98927_i2c_probe(struct i2c_client *i2c,
738 const struct i2c_device_id *id)
739{
740
741 int ret = 0, value;
742 int reg = 0;
743 struct max98927_priv *max98927 = NULL;
744
745 max98927 = devm_kzalloc(&i2c->dev,
746 sizeof(*max98927), GFP_KERNEL);
747
748 if (!max98927) {
749 ret = -ENOMEM;
750 return ret;
751 }
752 i2c_set_clientdata(i2c, max98927);
753
754 /* update interleave mode info */
755 if (!of_property_read_u32(i2c->dev.of_node,
756 "interleave_mode", &value)) {
757 if (value > 0)
758 max98927->interleave_mode = 1;
759 else
760 max98927->interleave_mode = 0;
761 } else
762 max98927->interleave_mode = 0;
763
764 /* regmap initialization */
765 max98927->regmap
766 = devm_regmap_init_i2c(i2c, &max98927_regmap);
767 if (IS_ERR(max98927->regmap)) {
768 ret = PTR_ERR(max98927->regmap);
769 dev_err(&i2c->dev,
770 "Failed to allocate regmap: %d\n", ret);
771 return ret;
772 }
773
774 /* Check Revision ID */
775 ret = regmap_read(max98927->regmap,
776 MAX98927_R01FF_REV_ID, &reg);
777 if (ret < 0) {
778 dev_err(&i2c->dev,
779 "Failed to read: 0x%02X\n", MAX98927_R01FF_REV_ID);
780 return ret;
781 }
782 dev_info(&i2c->dev, "MAX98927 revisionID: 0x%02X\n", reg);
783
784 /* voltage/current slot configuration */
785 max98927_slot_config(i2c, max98927);
786
787 /* codec registeration */
788 ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_max98927,
789 max98927_dai, ARRAY_SIZE(max98927_dai));
790 if (ret < 0)
791 dev_err(&i2c->dev, "Failed to register codec: %d\n", ret);
792
793 return ret;
794}
795
796static int max98927_i2c_remove(struct i2c_client *client)
797{
798 snd_soc_unregister_codec(&client->dev);
799 return 0;
800}
801
802static const struct i2c_device_id max98927_i2c_id[] = {
803 { "max98927", 0},
804 { },
805};
806
807MODULE_DEVICE_TABLE(i2c, max98927_i2c_id);
808
809#if defined(CONFIG_OF)
810static const struct of_device_id max98927_of_match[] = {
811 { .compatible = "maxim,max98927", },
812 { }
813};
814MODULE_DEVICE_TABLE(of, max98927_of_match);
815#endif
816
817#ifdef CONFIG_ACPI
818static const struct acpi_device_id max98927_acpi_match[] = {
819 { "MX98927", 0 },
820 {},
821};
822MODULE_DEVICE_TABLE(acpi, max98927_acpi_match);
823#endif
824
825static struct i2c_driver max98927_i2c_driver = {
826 .driver = {
827 .name = "max98927",
828 .of_match_table = of_match_ptr(max98927_of_match),
829 .acpi_match_table = ACPI_PTR(max98927_acpi_match),
830 .pm = NULL,
831 },
832 .probe = max98927_i2c_probe,
833 .remove = max98927_i2c_remove,
834 .id_table = max98927_i2c_id,
835};
836
837module_i2c_driver(max98927_i2c_driver)
838
839MODULE_DESCRIPTION("ALSA SoC MAX98927 driver");
840MODULE_AUTHOR("Ryan Lee <ryans.lee@maximintegrated.com>");
841MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/max98927.h b/sound/soc/codecs/max98927.h
new file mode 100644
index 000000000000..ece6a608cbe1
--- /dev/null
+++ b/sound/soc/codecs/max98927.h
@@ -0,0 +1,272 @@
1/*
2 * max98927.h -- MAX98927 ALSA Soc Audio driver
3 *
4 * Copyright 2013-15 Maxim Integrated Products
5 * Author: Ryan Lee <ryans.lee@maximintegrated.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 */
13#ifndef _MAX98927_H
14#define _MAX98927_H
15
16/* Register Values */
17#define MAX98927_R0001_INT_RAW1 0x0001
18#define MAX98927_R0002_INT_RAW2 0x0002
19#define MAX98927_R0003_INT_RAW3 0x0003
20#define MAX98927_R0004_INT_STATE1 0x0004
21#define MAX98927_R0005_INT_STATE2 0x0005
22#define MAX98927_R0006_INT_STATE3 0x0006
23#define MAX98927_R0007_INT_FLAG1 0x0007
24#define MAX98927_R0008_INT_FLAG2 0x0008
25#define MAX98927_R0009_INT_FLAG3 0x0009
26#define MAX98927_R000A_INT_EN1 0x000A
27#define MAX98927_R000B_INT_EN2 0x000B
28#define MAX98927_R000C_INT_EN3 0x000C
29#define MAX98927_R000D_INT_FLAG_CLR1 0x000D
30#define MAX98927_R000E_INT_FLAG_CLR2 0x000E
31#define MAX98927_R000F_INT_FLAG_CLR3 0x000F
32#define MAX98927_R0010_IRQ_CTRL 0x0010
33#define MAX98927_R0011_CLK_MON 0x0011
34#define MAX98927_R0012_WDOG_CTRL 0x0012
35#define MAX98927_R0013_WDOG_RST 0x0013
36#define MAX98927_R0014_MEAS_ADC_THERM_WARN_THRESH 0x0014
37#define MAX98927_R0015_MEAS_ADC_THERM_SHDN_THRESH 0x0015
38#define MAX98927_R0016_MEAS_ADC_THERM_HYSTERESIS 0x0016
39#define MAX98927_R0017_PIN_CFG 0x0017
40#define MAX98927_R0018_PCM_RX_EN_A 0x0018
41#define MAX98927_R0019_PCM_RX_EN_B 0x0019
42#define MAX98927_R001A_PCM_TX_EN_A 0x001A
43#define MAX98927_R001B_PCM_TX_EN_B 0x001B
44#define MAX98927_R001C_PCM_TX_HIZ_CTRL_A 0x001C
45#define MAX98927_R001D_PCM_TX_HIZ_CTRL_B 0x001D
46#define MAX98927_R001E_PCM_TX_CH_SRC_A 0x001E
47#define MAX98927_R001F_PCM_TX_CH_SRC_B 0x001F
48#define MAX98927_R0020_PCM_MODE_CFG 0x0020
49#define MAX98927_R0021_PCM_MASTER_MODE 0x0021
50#define MAX98927_R0022_PCM_CLK_SETUP 0x0022
51#define MAX98927_R0023_PCM_SR_SETUP1 0x0023
52#define MAX98927_R0024_PCM_SR_SETUP2 0x0024
53#define MAX98927_R0025_PCM_TO_SPK_MONOMIX_A 0x0025
54#define MAX98927_R0026_PCM_TO_SPK_MONOMIX_B 0x0026
55#define MAX98927_R0027_ICC_RX_EN_A 0x0027
56#define MAX98927_R0028_ICC_RX_EN_B 0x0028
57#define MAX98927_R002B_ICC_TX_EN_A 0x002B
58#define MAX98927_R002C_ICC_TX_EN_B 0x002C
59#define MAX98927_R002E_ICC_HIZ_MANUAL_MODE 0x002E
60#define MAX98927_R002F_ICC_TX_HIZ_EN_A 0x002F
61#define MAX98927_R0030_ICC_TX_HIZ_EN_B 0x0030
62#define MAX98927_R0031_ICC_LNK_EN 0x0031
63#define MAX98927_R0032_PDM_TX_EN 0x0032
64#define MAX98927_R0033_PDM_TX_HIZ_CTRL 0x0033
65#define MAX98927_R0034_PDM_TX_CTRL 0x0034
66#define MAX98927_R0035_PDM_RX_CTRL 0x0035
67#define MAX98927_R0036_AMP_VOL_CTRL 0x0036
68#define MAX98927_R0037_AMP_DSP_CFG 0x0037
69#define MAX98927_R0038_TONE_GEN_DC_CFG 0x0038
70#define MAX98927_R0039_DRE_CTRL 0x0039
71#define MAX98927_R003A_AMP_EN 0x003A
72#define MAX98927_R003B_SPK_SRC_SEL 0x003B
73#define MAX98927_R003C_SPK_GAIN 0x003C
74#define MAX98927_R003D_SSM_CFG 0x003D
75#define MAX98927_R003E_MEAS_EN 0x003E
76#define MAX98927_R003F_MEAS_DSP_CFG 0x003F
77#define MAX98927_R0040_BOOST_CTRL0 0x0040
78#define MAX98927_R0041_BOOST_CTRL3 0x0041
79#define MAX98927_R0042_BOOST_CTRL1 0x0042
80#define MAX98927_R0043_MEAS_ADC_CFG 0x0043
81#define MAX98927_R0044_MEAS_ADC_BASE_MSB 0x0044
82#define MAX98927_R0045_MEAS_ADC_BASE_LSB 0x0045
83#define MAX98927_R0046_ADC_CH0_DIVIDE 0x0046
84#define MAX98927_R0047_ADC_CH1_DIVIDE 0x0047
85#define MAX98927_R0048_ADC_CH2_DIVIDE 0x0048
86#define MAX98927_R0049_ADC_CH0_FILT_CFG 0x0049
87#define MAX98927_R004A_ADC_CH1_FILT_CFG 0x004A
88#define MAX98927_R004B_ADC_CH2_FILT_CFG 0x004B
89#define MAX98927_R004C_MEAS_ADC_CH0_READ 0x004C
90#define MAX98927_R004D_MEAS_ADC_CH1_READ 0x004D
91#define MAX98927_R004E_MEAS_ADC_CH2_READ 0x004E
92#define MAX98927_R0051_BROWNOUT_STATUS 0x0051
93#define MAX98927_R0052_BROWNOUT_EN 0x0052
94#define MAX98927_R0053_BROWNOUT_INFINITE_HOLD 0x0053
95#define MAX98927_R0054_BROWNOUT_INFINITE_HOLD_CLR 0x0054
96#define MAX98927_R0055_BROWNOUT_LVL_HOLD 0x0055
97#define MAX98927_R005A_BROWNOUT_LVL1_THRESH 0x005A
98#define MAX98927_R005B_BROWNOUT_LVL2_THRESH 0x005B
99#define MAX98927_R005C_BROWNOUT_LVL3_THRESH 0x005C
100#define MAX98927_R005D_BROWNOUT_LVL4_THRESH 0x005D
101#define MAX98927_R005E_BROWNOUT_THRESH_HYSTERYSIS 0x005E
102#define MAX98927_R005F_BROWNOUT_AMP_LIMITER_ATK_REL 0x005F
103#define MAX98927_R0060_BROWNOUT_AMP_GAIN_ATK_REL 0x0060
104#define MAX98927_R0061_BROWNOUT_AMP1_CLIP_MODE 0x0061
105#define MAX98927_R0072_BROWNOUT_LVL1_CUR_LIMIT 0x0072
106#define MAX98927_R0073_BROWNOUT_LVL1_AMP1_CTRL1 0x0073
107#define MAX98927_R0074_BROWNOUT_LVL1_AMP1_CTRL2 0x0074
108#define MAX98927_R0075_BROWNOUT_LVL1_AMP1_CTRL3 0x0075
109#define MAX98927_R0076_BROWNOUT_LVL2_CUR_LIMIT 0x0076
110#define MAX98927_R0077_BROWNOUT_LVL2_AMP1_CTRL1 0x0077
111#define MAX98927_R0078_BROWNOUT_LVL2_AMP1_CTRL2 0x0078
112#define MAX98927_R0079_BROWNOUT_LVL2_AMP1_CTRL3 0x0079
113#define MAX98927_R007A_BROWNOUT_LVL3_CUR_LIMIT 0x007A
114#define MAX98927_R007B_BROWNOUT_LVL3_AMP1_CTRL1 0x007B
115#define MAX98927_R007C_BROWNOUT_LVL3_AMP1_CTRL2 0x007C
116#define MAX98927_R007D_BROWNOUT_LVL3_AMP1_CTRL3 0x007D
117#define MAX98927_R007E_BROWNOUT_LVL4_CUR_LIMIT 0x007E
118#define MAX98927_R007F_BROWNOUT_LVL4_AMP1_CTRL1 0x007F
119#define MAX98927_R0080_BROWNOUT_LVL4_AMP1_CTRL2 0x0080
120#define MAX98927_R0081_BROWNOUT_LVL4_AMP1_CTRL3 0x0081
121#define MAX98927_R0082_ENV_TRACK_VOUT_HEADROOM 0x0082
122#define MAX98927_R0083_ENV_TRACK_BOOST_VOUT_DELAY 0x0083
123#define MAX98927_R0084_ENV_TRACK_REL_RATE 0x0084
124#define MAX98927_R0085_ENV_TRACK_HOLD_RATE 0x0085
125#define MAX98927_R0086_ENV_TRACK_CTRL 0x0086
126#define MAX98927_R0087_ENV_TRACK_BOOST_VOUT_READ 0x0087
127#define MAX98927_R00FF_GLOBAL_SHDN 0x00FF
128#define MAX98927_R0100_SOFT_RESET 0x0100
129#define MAX98927_R01FF_REV_ID 0x01FF
130
131/* MAX98927_R0018_PCM_RX_EN_A */
132#define MAX98927_PCM_RX_CH0_EN (0x1 << 0)
133#define MAX98927_PCM_RX_CH1_EN (0x1 << 1)
134#define MAX98927_PCM_RX_CH2_EN (0x1 << 2)
135#define MAX98927_PCM_RX_CH3_EN (0x1 << 3)
136#define MAX98927_PCM_RX_CH4_EN (0x1 << 4)
137#define MAX98927_PCM_RX_CH5_EN (0x1 << 5)
138#define MAX98927_PCM_RX_CH6_EN (0x1 << 6)
139#define MAX98927_PCM_RX_CH7_EN (0x1 << 7)
140
141/* MAX98927_R001A_PCM_TX_EN_A */
142#define MAX98927_PCM_TX_CH0_EN (0x1 << 0)
143#define MAX98927_PCM_TX_CH1_EN (0x1 << 1)
144#define MAX98927_PCM_TX_CH2_EN (0x1 << 2)
145#define MAX98927_PCM_TX_CH3_EN (0x1 << 3)
146#define MAX98927_PCM_TX_CH4_EN (0x1 << 4)
147#define MAX98927_PCM_TX_CH5_EN (0x1 << 5)
148#define MAX98927_PCM_TX_CH6_EN (0x1 << 6)
149#define MAX98927_PCM_TX_CH7_EN (0x1 << 7)
150
151/* MAX98927_R001E_PCM_TX_CH_SRC_A */
152#define MAX98927_PCM_TX_CH_SRC_A_V_SHIFT (0)
153#define MAX98927_PCM_TX_CH_SRC_A_I_SHIFT (4)
154
155/* MAX98927_R001F_PCM_TX_CH_SRC_B */
156#define MAX98927_PCM_TX_CH_INTERLEAVE_MASK (0x1 << 5)
157
158/* MAX98927_R0020_PCM_MODE_CFG */
159#define MAX98927_PCM_MODE_CFG_PCM_BCLKEDGE (0x1 << 2)
160#define MAX98927_PCM_MODE_CFG_FORMAT_MASK (0x7 << 3)
161#define MAX98927_PCM_MODE_CFG_FORMAT_SHIFT (3)
162#define MAX98927_PCM_FORMAT_I2S (0x0 << 0)
163#define MAX98927_PCM_FORMAT_LJ (0x1 << 0)
164
165#define MAX98927_PCM_MODE_CFG_CHANSZ_MASK (0x3 << 6)
166#define MAX98927_PCM_MODE_CFG_CHANSZ_16 (0x1 << 6)
167#define MAX98927_PCM_MODE_CFG_CHANSZ_24 (0x2 << 6)
168#define MAX98927_PCM_MODE_CFG_CHANSZ_32 (0x3 << 6)
169
170/* MAX98927_R0021_PCM_MASTER_MODE */
171#define MAX98927_PCM_MASTER_MODE_MASK (0x3 << 0)
172#define MAX98927_PCM_MASTER_MODE_SLAVE (0x0 << 0)
173#define MAX98927_PCM_MASTER_MODE_MASTER (0x3 << 0)
174
175#define MAX98927_PCM_MASTER_MODE_MCLK_MASK (0xF << 2)
176#define MAX98927_PCM_MASTER_MODE_MCLK_RATE_SHIFT (2)
177
178/* MAX98927_R0022_PCM_CLK_SETUP */
179#define MAX98927_PCM_CLK_SETUP_BSEL_MASK (0xF << 0)
180
181/* MAX98927_R0023_PCM_SR_SETUP1 */
182#define MAX98927_PCM_SR_SET1_SR_MASK (0xF << 0)
183
184#define MAX98927_PCM_SR_SET1_SR_8000 (0x0 << 0)
185#define MAX98927_PCM_SR_SET1_SR_11025 (0x1 << 0)
186#define MAX98927_PCM_SR_SET1_SR_12000 (0x2 << 0)
187#define MAX98927_PCM_SR_SET1_SR_16000 (0x3 << 0)
188#define MAX98927_PCM_SR_SET1_SR_22050 (0x4 << 0)
189#define MAX98927_PCM_SR_SET1_SR_24000 (0x5 << 0)
190#define MAX98927_PCM_SR_SET1_SR_32000 (0x6 << 0)
191#define MAX98927_PCM_SR_SET1_SR_44100 (0x7 << 0)
192#define MAX98927_PCM_SR_SET1_SR_48000 (0x8 << 0)
193
194/* MAX98927_R0024_PCM_SR_SETUP2 */
195#define MAX98927_PCM_SR_SET2_SR_MASK (0xF << 4)
196#define MAX98927_PCM_SR_SET2_SR_SHIFT (4)
197#define MAX98927_PCM_SR_SET2_IVADC_SR_MASK (0xf << 0)
198
199/* MAX98927_R0025_PCM_TO_SPK_MONOMIX_A */
200#define MAX98927_PCM_TO_SPK_MONOMIX_CFG_MASK (0x3 << 6)
201#define MAX98927_PCM_TO_SPK_MONOMIX_CFG_SHIFT (6)
202
203/* MAX98927_R0035_PDM_RX_CTRL */
204#define MAX98927_PDM_RX_EN_MASK (0x1 << 0)
205
206/* MAX98927_R0036_AMP_VOL_CTRL */
207#define MAX98927_AMP_VOL_SEL (0x1 << 7)
208#define MAX98927_AMP_VOL_SEL_WIDTH (1)
209#define MAX98927_AMP_VOL_SEL_SHIFT (7)
210#define MAX98927_AMP_VOL_MASK (0x7f << 0)
211#define MAX98927_AMP_VOL_WIDTH (7)
212#define MAX98927_AMP_VOL_SHIFT (0)
213
214/* MAX98927_R0037_AMP_DSP_CFG */
215#define MAX98927_AMP_DSP_CFG_DCBLK_EN (0x1 << 0)
216#define MAX98927_AMP_DSP_CFG_DITH_EN (0x1 << 1)
217#define MAX98927_AMP_DSP_CFG_RMP_BYPASS (0x1 << 4)
218#define MAX98927_AMP_DSP_CFG_DAC_INV (0x1 << 5)
219#define MAX98927_AMP_DSP_CFG_RMP_SHIFT (4)
220
221/* MAX98927_R0039_DRE_CTRL */
222#define MAX98927_DRE_CTRL_DRE_EN (0x1 << 0)
223#define MAX98927_DRE_EN_SHIFT 0x1
224
225/* MAX98927_R003A_AMP_EN */
226#define MAX98927_AMP_EN_MASK (0x1 << 0)
227
228/* MAX98927_R003B_SPK_SRC_SEL */
229#define MAX98927_SPK_SRC_MASK (0x3 << 0)
230
231/* MAX98927_R003C_SPK_GAIN */
232#define MAX98927_SPK_PCM_GAIN_MASK (0x7 << 0)
233#define MAX98927_SPK_PDM_GAIN_MASK (0x7 << 4)
234#define MAX98927_SPK_GAIN_WIDTH (3)
235
236/* MAX98927_R003E_MEAS_EN */
237#define MAX98927_MEAS_V_EN (0x1 << 0)
238#define MAX98927_MEAS_I_EN (0x1 << 1)
239
240/* MAX98927_R0040_BOOST_CTRL0 */
241#define MAX98927_BOOST_CTRL0_VOUT_MASK (0x1f << 0)
242#define MAX98927_BOOST_CTRL0_PVDD_MASK (0x1 << 7)
243#define MAX98927_BOOST_CTRL0_PVDD_EN_SHIFT (7)
244
245/* MAX98927_R0052_BROWNOUT_EN */
246#define MAX98927_BROWNOUT_BDE_EN (0x1 << 0)
247#define MAX98927_BROWNOUT_AMP_EN (0x1 << 1)
248#define MAX98927_BROWNOUT_DSP_EN (0x1 << 2)
249#define MAX98927_BROWNOUT_DSP_SHIFT (2)
250
251/* MAX98927_R0100_SOFT_RESET */
252#define MAX98927_SOFT_RESET (0x1 << 0)
253
254/* MAX98927_R00FF_GLOBAL_SHDN */
255#define MAX98927_GLOBAL_EN_MASK (0x1 << 0)
256
257struct max98927_priv {
258 struct regmap *regmap;
259 struct snd_soc_codec *codec;
260 struct max98927_pdata *pdata;
261 unsigned int spk_gain;
262 unsigned int sysclk;
263 unsigned int v_l_slot;
264 unsigned int i_l_slot;
265 bool interleave_mode;
266 unsigned int ch_size;
267 unsigned int rate;
268 unsigned int iface;
269 unsigned int master;
270 unsigned int digital_gain;
271};
272#endif
diff --git a/sound/soc/codecs/nau8540.c b/sound/soc/codecs/nau8540.c
index 9e8f0f4aa51a..c8bcb1db966d 100644
--- a/sound/soc/codecs/nau8540.c
+++ b/sound/soc/codecs/nau8540.c
@@ -39,147 +39,147 @@
39 39
40/* scaling for mclk from sysclk_src output */ 40/* scaling for mclk from sysclk_src output */
41static const struct nau8540_fll_attr mclk_src_scaling[] = { 41static const struct nau8540_fll_attr mclk_src_scaling[] = {
42 { 1, 0x0 }, 42 { 1, 0x0 },
43 { 2, 0x2 }, 43 { 2, 0x2 },
44 { 4, 0x3 }, 44 { 4, 0x3 },
45 { 8, 0x4 }, 45 { 8, 0x4 },
46 { 16, 0x5 }, 46 { 16, 0x5 },
47 { 32, 0x6 }, 47 { 32, 0x6 },
48 { 3, 0x7 }, 48 { 3, 0x7 },
49 { 6, 0xa }, 49 { 6, 0xa },
50 { 12, 0xb }, 50 { 12, 0xb },
51 { 24, 0xc }, 51 { 24, 0xc },
52}; 52};
53 53
54/* ratio for input clk freq */ 54/* ratio for input clk freq */
55static const struct nau8540_fll_attr fll_ratio[] = { 55static const struct nau8540_fll_attr fll_ratio[] = {
56 { 512000, 0x01 }, 56 { 512000, 0x01 },
57 { 256000, 0x02 }, 57 { 256000, 0x02 },
58 { 128000, 0x04 }, 58 { 128000, 0x04 },
59 { 64000, 0x08 }, 59 { 64000, 0x08 },
60 { 32000, 0x10 }, 60 { 32000, 0x10 },
61 { 8000, 0x20 }, 61 { 8000, 0x20 },
62 { 4000, 0x40 }, 62 { 4000, 0x40 },
63}; 63};
64 64
65static const struct nau8540_fll_attr fll_pre_scalar[] = { 65static const struct nau8540_fll_attr fll_pre_scalar[] = {
66 { 1, 0x0 }, 66 { 1, 0x0 },
67 { 2, 0x1 }, 67 { 2, 0x1 },
68 { 4, 0x2 }, 68 { 4, 0x2 },
69 { 8, 0x3 }, 69 { 8, 0x3 },
70}; 70};
71 71
72/* over sampling rate */ 72/* over sampling rate */
73static const struct nau8540_osr_attr osr_adc_sel[] = { 73static const struct nau8540_osr_attr osr_adc_sel[] = {
74 { 32, 3 }, /* OSR 32, SRC 1/8 */ 74 { 32, 3 }, /* OSR 32, SRC 1/8 */
75 { 64, 2 }, /* OSR 64, SRC 1/4 */ 75 { 64, 2 }, /* OSR 64, SRC 1/4 */
76 { 128, 1 }, /* OSR 128, SRC 1/2 */ 76 { 128, 1 }, /* OSR 128, SRC 1/2 */
77 { 256, 0 }, /* OSR 256, SRC 1 */ 77 { 256, 0 }, /* OSR 256, SRC 1 */
78}; 78};
79 79
80static const struct reg_default nau8540_reg_defaults[] = { 80static const struct reg_default nau8540_reg_defaults[] = {
81 {NAU8540_REG_POWER_MANAGEMENT, 0x0000}, 81 {NAU8540_REG_POWER_MANAGEMENT, 0x0000},
82 {NAU8540_REG_CLOCK_CTRL, 0x0000}, 82 {NAU8540_REG_CLOCK_CTRL, 0x0000},
83 {NAU8540_REG_CLOCK_SRC, 0x0000}, 83 {NAU8540_REG_CLOCK_SRC, 0x0000},
84 {NAU8540_REG_FLL1, 0x0001}, 84 {NAU8540_REG_FLL1, 0x0001},
85 {NAU8540_REG_FLL2, 0x3126}, 85 {NAU8540_REG_FLL2, 0x3126},
86 {NAU8540_REG_FLL3, 0x0008}, 86 {NAU8540_REG_FLL3, 0x0008},
87 {NAU8540_REG_FLL4, 0x0010}, 87 {NAU8540_REG_FLL4, 0x0010},
88 {NAU8540_REG_FLL5, 0xC000}, 88 {NAU8540_REG_FLL5, 0xC000},
89 {NAU8540_REG_FLL6, 0x6000}, 89 {NAU8540_REG_FLL6, 0x6000},
90 {NAU8540_REG_FLL_VCO_RSV, 0xF13C}, 90 {NAU8540_REG_FLL_VCO_RSV, 0xF13C},
91 {NAU8540_REG_PCM_CTRL0, 0x000B}, 91 {NAU8540_REG_PCM_CTRL0, 0x000B},
92 {NAU8540_REG_PCM_CTRL1, 0x3010}, 92 {NAU8540_REG_PCM_CTRL1, 0x3010},
93 {NAU8540_REG_PCM_CTRL2, 0x0800}, 93 {NAU8540_REG_PCM_CTRL2, 0x0800},
94 {NAU8540_REG_PCM_CTRL3, 0x0000}, 94 {NAU8540_REG_PCM_CTRL3, 0x0000},
95 {NAU8540_REG_PCM_CTRL4, 0x000F}, 95 {NAU8540_REG_PCM_CTRL4, 0x000F},
96 {NAU8540_REG_ALC_CONTROL_1, 0x0000}, 96 {NAU8540_REG_ALC_CONTROL_1, 0x0000},
97 {NAU8540_REG_ALC_CONTROL_2, 0x700B}, 97 {NAU8540_REG_ALC_CONTROL_2, 0x700B},
98 {NAU8540_REG_ALC_CONTROL_3, 0x0022}, 98 {NAU8540_REG_ALC_CONTROL_3, 0x0022},
99 {NAU8540_REG_ALC_CONTROL_4, 0x1010}, 99 {NAU8540_REG_ALC_CONTROL_4, 0x1010},
100 {NAU8540_REG_ALC_CONTROL_5, 0x1010}, 100 {NAU8540_REG_ALC_CONTROL_5, 0x1010},
101 {NAU8540_REG_NOTCH_FIL1_CH1, 0x0000}, 101 {NAU8540_REG_NOTCH_FIL1_CH1, 0x0000},
102 {NAU8540_REG_NOTCH_FIL2_CH1, 0x0000}, 102 {NAU8540_REG_NOTCH_FIL2_CH1, 0x0000},
103 {NAU8540_REG_NOTCH_FIL1_CH2, 0x0000}, 103 {NAU8540_REG_NOTCH_FIL1_CH2, 0x0000},
104 {NAU8540_REG_NOTCH_FIL2_CH2, 0x0000}, 104 {NAU8540_REG_NOTCH_FIL2_CH2, 0x0000},
105 {NAU8540_REG_NOTCH_FIL1_CH3, 0x0000}, 105 {NAU8540_REG_NOTCH_FIL1_CH3, 0x0000},
106 {NAU8540_REG_NOTCH_FIL2_CH3, 0x0000}, 106 {NAU8540_REG_NOTCH_FIL2_CH3, 0x0000},
107 {NAU8540_REG_NOTCH_FIL1_CH4, 0x0000}, 107 {NAU8540_REG_NOTCH_FIL1_CH4, 0x0000},
108 {NAU8540_REG_NOTCH_FIL2_CH4, 0x0000}, 108 {NAU8540_REG_NOTCH_FIL2_CH4, 0x0000},
109 {NAU8540_REG_HPF_FILTER_CH12, 0x0000}, 109 {NAU8540_REG_HPF_FILTER_CH12, 0x0000},
110 {NAU8540_REG_HPF_FILTER_CH34, 0x0000}, 110 {NAU8540_REG_HPF_FILTER_CH34, 0x0000},
111 {NAU8540_REG_ADC_SAMPLE_RATE, 0x0002}, 111 {NAU8540_REG_ADC_SAMPLE_RATE, 0x0002},
112 {NAU8540_REG_DIGITAL_GAIN_CH1, 0x0400}, 112 {NAU8540_REG_DIGITAL_GAIN_CH1, 0x0400},
113 {NAU8540_REG_DIGITAL_GAIN_CH2, 0x0400}, 113 {NAU8540_REG_DIGITAL_GAIN_CH2, 0x0400},
114 {NAU8540_REG_DIGITAL_GAIN_CH3, 0x0400}, 114 {NAU8540_REG_DIGITAL_GAIN_CH3, 0x0400},
115 {NAU8540_REG_DIGITAL_GAIN_CH4, 0x0400}, 115 {NAU8540_REG_DIGITAL_GAIN_CH4, 0x0400},
116 {NAU8540_REG_DIGITAL_MUX, 0x00E4}, 116 {NAU8540_REG_DIGITAL_MUX, 0x00E4},
117 {NAU8540_REG_GPIO_CTRL, 0x0000}, 117 {NAU8540_REG_GPIO_CTRL, 0x0000},
118 {NAU8540_REG_MISC_CTRL, 0x0000}, 118 {NAU8540_REG_MISC_CTRL, 0x0000},
119 {NAU8540_REG_I2C_CTRL, 0xEFFF}, 119 {NAU8540_REG_I2C_CTRL, 0xEFFF},
120 {NAU8540_REG_VMID_CTRL, 0x0000}, 120 {NAU8540_REG_VMID_CTRL, 0x0000},
121 {NAU8540_REG_MUTE, 0x0000}, 121 {NAU8540_REG_MUTE, 0x0000},
122 {NAU8540_REG_ANALOG_ADC1, 0x0011}, 122 {NAU8540_REG_ANALOG_ADC1, 0x0011},
123 {NAU8540_REG_ANALOG_ADC2, 0x0020}, 123 {NAU8540_REG_ANALOG_ADC2, 0x0020},
124 {NAU8540_REG_ANALOG_PWR, 0x0000}, 124 {NAU8540_REG_ANALOG_PWR, 0x0000},
125 {NAU8540_REG_MIC_BIAS, 0x0004}, 125 {NAU8540_REG_MIC_BIAS, 0x0004},
126 {NAU8540_REG_REFERENCE, 0x0000}, 126 {NAU8540_REG_REFERENCE, 0x0000},
127 {NAU8540_REG_FEPGA1, 0x0000}, 127 {NAU8540_REG_FEPGA1, 0x0000},
128 {NAU8540_REG_FEPGA2, 0x0000}, 128 {NAU8540_REG_FEPGA2, 0x0000},
129 {NAU8540_REG_FEPGA3, 0x0101}, 129 {NAU8540_REG_FEPGA3, 0x0101},
130 {NAU8540_REG_FEPGA4, 0x0101}, 130 {NAU8540_REG_FEPGA4, 0x0101},
131 {NAU8540_REG_PWR, 0x0000}, 131 {NAU8540_REG_PWR, 0x0000},
132}; 132};
133 133
134static bool nau8540_readable_reg(struct device *dev, unsigned int reg) 134static bool nau8540_readable_reg(struct device *dev, unsigned int reg)
135{ 135{
136 switch (reg) { 136 switch (reg) {
137 case NAU8540_REG_POWER_MANAGEMENT ... NAU8540_REG_FLL_VCO_RSV: 137 case NAU8540_REG_POWER_MANAGEMENT ... NAU8540_REG_FLL_VCO_RSV:
138 case NAU8540_REG_PCM_CTRL0 ... NAU8540_REG_PCM_CTRL4: 138 case NAU8540_REG_PCM_CTRL0 ... NAU8540_REG_PCM_CTRL4:
139 case NAU8540_REG_ALC_CONTROL_1 ... NAU8540_REG_ALC_CONTROL_5: 139 case NAU8540_REG_ALC_CONTROL_1 ... NAU8540_REG_ALC_CONTROL_5:
140 case NAU8540_REG_ALC_GAIN_CH12 ... NAU8540_REG_ADC_SAMPLE_RATE: 140 case NAU8540_REG_ALC_GAIN_CH12 ... NAU8540_REG_ADC_SAMPLE_RATE:
141 case NAU8540_REG_DIGITAL_GAIN_CH1 ... NAU8540_REG_DIGITAL_MUX: 141 case NAU8540_REG_DIGITAL_GAIN_CH1 ... NAU8540_REG_DIGITAL_MUX:
142 case NAU8540_REG_P2P_CH1 ... NAU8540_REG_I2C_CTRL: 142 case NAU8540_REG_P2P_CH1 ... NAU8540_REG_I2C_CTRL:
143 case NAU8540_REG_I2C_DEVICE_ID: 143 case NAU8540_REG_I2C_DEVICE_ID:
144 case NAU8540_REG_VMID_CTRL ... NAU8540_REG_MUTE: 144 case NAU8540_REG_VMID_CTRL ... NAU8540_REG_MUTE:
145 case NAU8540_REG_ANALOG_ADC1 ... NAU8540_REG_PWR: 145 case NAU8540_REG_ANALOG_ADC1 ... NAU8540_REG_PWR:
146 return true; 146 return true;
147 default: 147 default:
148 return false; 148 return false;
149 } 149 }
150 150
151} 151}
152 152
153static bool nau8540_writeable_reg(struct device *dev, unsigned int reg) 153static bool nau8540_writeable_reg(struct device *dev, unsigned int reg)
154{ 154{
155 switch (reg) { 155 switch (reg) {
156 case NAU8540_REG_SW_RESET ... NAU8540_REG_FLL_VCO_RSV: 156 case NAU8540_REG_SW_RESET ... NAU8540_REG_FLL_VCO_RSV:
157 case NAU8540_REG_PCM_CTRL0 ... NAU8540_REG_PCM_CTRL4: 157 case NAU8540_REG_PCM_CTRL0 ... NAU8540_REG_PCM_CTRL4:
158 case NAU8540_REG_ALC_CONTROL_1 ... NAU8540_REG_ALC_CONTROL_5: 158 case NAU8540_REG_ALC_CONTROL_1 ... NAU8540_REG_ALC_CONTROL_5:
159 case NAU8540_REG_NOTCH_FIL1_CH1 ... NAU8540_REG_ADC_SAMPLE_RATE: 159 case NAU8540_REG_NOTCH_FIL1_CH1 ... NAU8540_REG_ADC_SAMPLE_RATE:
160 case NAU8540_REG_DIGITAL_GAIN_CH1 ... NAU8540_REG_DIGITAL_MUX: 160 case NAU8540_REG_DIGITAL_GAIN_CH1 ... NAU8540_REG_DIGITAL_MUX:
161 case NAU8540_REG_GPIO_CTRL ... NAU8540_REG_I2C_CTRL: 161 case NAU8540_REG_GPIO_CTRL ... NAU8540_REG_I2C_CTRL:
162 case NAU8540_REG_RST: 162 case NAU8540_REG_RST:
163 case NAU8540_REG_VMID_CTRL ... NAU8540_REG_MUTE: 163 case NAU8540_REG_VMID_CTRL ... NAU8540_REG_MUTE:
164 case NAU8540_REG_ANALOG_ADC1 ... NAU8540_REG_PWR: 164 case NAU8540_REG_ANALOG_ADC1 ... NAU8540_REG_PWR:
165 return true; 165 return true;
166 default: 166 default:
167 return false; 167 return false;
168 } 168 }
169} 169}
170 170
171static bool nau8540_volatile_reg(struct device *dev, unsigned int reg) 171static bool nau8540_volatile_reg(struct device *dev, unsigned int reg)
172{ 172{
173 switch (reg) { 173 switch (reg) {
174 case NAU8540_REG_SW_RESET: 174 case NAU8540_REG_SW_RESET:
175 case NAU8540_REG_ALC_GAIN_CH12 ... NAU8540_REG_ALC_STATUS: 175 case NAU8540_REG_ALC_GAIN_CH12 ... NAU8540_REG_ALC_STATUS:
176 case NAU8540_REG_P2P_CH1 ... NAU8540_REG_PEAK_CH4: 176 case NAU8540_REG_P2P_CH1 ... NAU8540_REG_PEAK_CH4:
177 case NAU8540_REG_I2C_DEVICE_ID: 177 case NAU8540_REG_I2C_DEVICE_ID:
178 case NAU8540_REG_RST: 178 case NAU8540_REG_RST:
179 return true; 179 return true;
180 default: 180 default:
181 return false; 181 return false;
182 } 182 }
183} 183}
184 184
185 185
@@ -187,255 +187,255 @@ static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -12800, 3600);
187static const DECLARE_TLV_DB_MINMAX(fepga_gain_tlv, -100, 3600); 187static const DECLARE_TLV_DB_MINMAX(fepga_gain_tlv, -100, 3600);
188 188
189static const struct snd_kcontrol_new nau8540_snd_controls[] = { 189static const struct snd_kcontrol_new nau8540_snd_controls[] = {
190 SOC_SINGLE_TLV("Mic1 Volume", NAU8540_REG_DIGITAL_GAIN_CH1, 190 SOC_SINGLE_TLV("Mic1 Volume", NAU8540_REG_DIGITAL_GAIN_CH1,
191 0, 0x520, 0, adc_vol_tlv), 191 0, 0x520, 0, adc_vol_tlv),
192 SOC_SINGLE_TLV("Mic2 Volume", NAU8540_REG_DIGITAL_GAIN_CH2, 192 SOC_SINGLE_TLV("Mic2 Volume", NAU8540_REG_DIGITAL_GAIN_CH2,
193 0, 0x520, 0, adc_vol_tlv), 193 0, 0x520, 0, adc_vol_tlv),
194 SOC_SINGLE_TLV("Mic3 Volume", NAU8540_REG_DIGITAL_GAIN_CH3, 194 SOC_SINGLE_TLV("Mic3 Volume", NAU8540_REG_DIGITAL_GAIN_CH3,
195 0, 0x520, 0, adc_vol_tlv), 195 0, 0x520, 0, adc_vol_tlv),
196 SOC_SINGLE_TLV("Mic4 Volume", NAU8540_REG_DIGITAL_GAIN_CH4, 196 SOC_SINGLE_TLV("Mic4 Volume", NAU8540_REG_DIGITAL_GAIN_CH4,
197 0, 0x520, 0, adc_vol_tlv), 197 0, 0x520, 0, adc_vol_tlv),
198 198
199 SOC_SINGLE_TLV("Frontend PGA1 Volume", NAU8540_REG_FEPGA3, 199 SOC_SINGLE_TLV("Frontend PGA1 Volume", NAU8540_REG_FEPGA3,
200 0, 0x25, 0, fepga_gain_tlv), 200 0, 0x25, 0, fepga_gain_tlv),
201 SOC_SINGLE_TLV("Frontend PGA2 Volume", NAU8540_REG_FEPGA3, 201 SOC_SINGLE_TLV("Frontend PGA2 Volume", NAU8540_REG_FEPGA3,
202 8, 0x25, 0, fepga_gain_tlv), 202 8, 0x25, 0, fepga_gain_tlv),
203 SOC_SINGLE_TLV("Frontend PGA3 Volume", NAU8540_REG_FEPGA4, 203 SOC_SINGLE_TLV("Frontend PGA3 Volume", NAU8540_REG_FEPGA4,
204 0, 0x25, 0, fepga_gain_tlv), 204 0, 0x25, 0, fepga_gain_tlv),
205 SOC_SINGLE_TLV("Frontend PGA4 Volume", NAU8540_REG_FEPGA4, 205 SOC_SINGLE_TLV("Frontend PGA4 Volume", NAU8540_REG_FEPGA4,
206 8, 0x25, 0, fepga_gain_tlv), 206 8, 0x25, 0, fepga_gain_tlv),
207}; 207};
208 208
209static const char * const adc_channel[] = { 209static const char * const adc_channel[] = {
210 "ADC channel 1", "ADC channel 2", "ADC channel 3", "ADC channel 4" 210 "ADC channel 1", "ADC channel 2", "ADC channel 3", "ADC channel 4"
211}; 211};
212static SOC_ENUM_SINGLE_DECL( 212static SOC_ENUM_SINGLE_DECL(
213 digital_ch4_enum, NAU8540_REG_DIGITAL_MUX, 6, adc_channel); 213 digital_ch4_enum, NAU8540_REG_DIGITAL_MUX, 6, adc_channel);
214 214
215static const struct snd_kcontrol_new digital_ch4_mux = 215static const struct snd_kcontrol_new digital_ch4_mux =
216 SOC_DAPM_ENUM("Digital CH4 Select", digital_ch4_enum); 216 SOC_DAPM_ENUM("Digital CH4 Select", digital_ch4_enum);
217 217
218static SOC_ENUM_SINGLE_DECL( 218static SOC_ENUM_SINGLE_DECL(
219 digital_ch3_enum, NAU8540_REG_DIGITAL_MUX, 4, adc_channel); 219 digital_ch3_enum, NAU8540_REG_DIGITAL_MUX, 4, adc_channel);
220 220
221static const struct snd_kcontrol_new digital_ch3_mux = 221static const struct snd_kcontrol_new digital_ch3_mux =
222 SOC_DAPM_ENUM("Digital CH3 Select", digital_ch3_enum); 222 SOC_DAPM_ENUM("Digital CH3 Select", digital_ch3_enum);
223 223
224static SOC_ENUM_SINGLE_DECL( 224static SOC_ENUM_SINGLE_DECL(
225 digital_ch2_enum, NAU8540_REG_DIGITAL_MUX, 2, adc_channel); 225 digital_ch2_enum, NAU8540_REG_DIGITAL_MUX, 2, adc_channel);
226 226
227static const struct snd_kcontrol_new digital_ch2_mux = 227static const struct snd_kcontrol_new digital_ch2_mux =
228 SOC_DAPM_ENUM("Digital CH2 Select", digital_ch2_enum); 228 SOC_DAPM_ENUM("Digital CH2 Select", digital_ch2_enum);
229 229
230static SOC_ENUM_SINGLE_DECL( 230static SOC_ENUM_SINGLE_DECL(
231 digital_ch1_enum, NAU8540_REG_DIGITAL_MUX, 0, adc_channel); 231 digital_ch1_enum, NAU8540_REG_DIGITAL_MUX, 0, adc_channel);
232 232
233static const struct snd_kcontrol_new digital_ch1_mux = 233static const struct snd_kcontrol_new digital_ch1_mux =
234 SOC_DAPM_ENUM("Digital CH1 Select", digital_ch1_enum); 234 SOC_DAPM_ENUM("Digital CH1 Select", digital_ch1_enum);
235 235
236static const struct snd_soc_dapm_widget nau8540_dapm_widgets[] = { 236static const struct snd_soc_dapm_widget nau8540_dapm_widgets[] = {
237 SND_SOC_DAPM_SUPPLY("MICBIAS2", NAU8540_REG_MIC_BIAS, 11, 0, NULL, 0), 237 SND_SOC_DAPM_SUPPLY("MICBIAS2", NAU8540_REG_MIC_BIAS, 11, 0, NULL, 0),
238 SND_SOC_DAPM_SUPPLY("MICBIAS1", NAU8540_REG_MIC_BIAS, 10, 0, NULL, 0), 238 SND_SOC_DAPM_SUPPLY("MICBIAS1", NAU8540_REG_MIC_BIAS, 10, 0, NULL, 0),
239 239
240 SND_SOC_DAPM_INPUT("MIC1"), 240 SND_SOC_DAPM_INPUT("MIC1"),
241 SND_SOC_DAPM_INPUT("MIC2"), 241 SND_SOC_DAPM_INPUT("MIC2"),
242 SND_SOC_DAPM_INPUT("MIC3"), 242 SND_SOC_DAPM_INPUT("MIC3"),
243 SND_SOC_DAPM_INPUT("MIC4"), 243 SND_SOC_DAPM_INPUT("MIC4"),
244 244
245 SND_SOC_DAPM_PGA("Frontend PGA1", NAU8540_REG_PWR, 12, 0, NULL, 0), 245 SND_SOC_DAPM_PGA("Frontend PGA1", NAU8540_REG_PWR, 12, 0, NULL, 0),
246 SND_SOC_DAPM_PGA("Frontend PGA2", NAU8540_REG_PWR, 13, 0, NULL, 0), 246 SND_SOC_DAPM_PGA("Frontend PGA2", NAU8540_REG_PWR, 13, 0, NULL, 0),
247 SND_SOC_DAPM_PGA("Frontend PGA3", NAU8540_REG_PWR, 14, 0, NULL, 0), 247 SND_SOC_DAPM_PGA("Frontend PGA3", NAU8540_REG_PWR, 14, 0, NULL, 0),
248 SND_SOC_DAPM_PGA("Frontend PGA4", NAU8540_REG_PWR, 15, 0, NULL, 0), 248 SND_SOC_DAPM_PGA("Frontend PGA4", NAU8540_REG_PWR, 15, 0, NULL, 0),
249 249
250 SND_SOC_DAPM_ADC("ADC1", NULL, 250 SND_SOC_DAPM_ADC("ADC1", NULL,
251 NAU8540_REG_POWER_MANAGEMENT, 0, 0), 251 NAU8540_REG_POWER_MANAGEMENT, 0, 0),
252 SND_SOC_DAPM_ADC("ADC2", NULL, 252 SND_SOC_DAPM_ADC("ADC2", NULL,
253 NAU8540_REG_POWER_MANAGEMENT, 1, 0), 253 NAU8540_REG_POWER_MANAGEMENT, 1, 0),
254 SND_SOC_DAPM_ADC("ADC3", NULL, 254 SND_SOC_DAPM_ADC("ADC3", NULL,
255 NAU8540_REG_POWER_MANAGEMENT, 2, 0), 255 NAU8540_REG_POWER_MANAGEMENT, 2, 0),
256 SND_SOC_DAPM_ADC("ADC4", NULL, 256 SND_SOC_DAPM_ADC("ADC4", NULL,
257 NAU8540_REG_POWER_MANAGEMENT, 3, 0), 257 NAU8540_REG_POWER_MANAGEMENT, 3, 0),
258 258
259 SND_SOC_DAPM_PGA("ADC CH1", NAU8540_REG_ANALOG_PWR, 0, 0, NULL, 0), 259 SND_SOC_DAPM_PGA("ADC CH1", NAU8540_REG_ANALOG_PWR, 0, 0, NULL, 0),
260 SND_SOC_DAPM_PGA("ADC CH2", NAU8540_REG_ANALOG_PWR, 1, 0, NULL, 0), 260 SND_SOC_DAPM_PGA("ADC CH2", NAU8540_REG_ANALOG_PWR, 1, 0, NULL, 0),
261 SND_SOC_DAPM_PGA("ADC CH3", NAU8540_REG_ANALOG_PWR, 2, 0, NULL, 0), 261 SND_SOC_DAPM_PGA("ADC CH3", NAU8540_REG_ANALOG_PWR, 2, 0, NULL, 0),
262 SND_SOC_DAPM_PGA("ADC CH4", NAU8540_REG_ANALOG_PWR, 3, 0, NULL, 0), 262 SND_SOC_DAPM_PGA("ADC CH4", NAU8540_REG_ANALOG_PWR, 3, 0, NULL, 0),
263 263
264 SND_SOC_DAPM_MUX("Digital CH4 Mux", 264 SND_SOC_DAPM_MUX("Digital CH4 Mux",
265 SND_SOC_NOPM, 0, 0, &digital_ch4_mux), 265 SND_SOC_NOPM, 0, 0, &digital_ch4_mux),
266 SND_SOC_DAPM_MUX("Digital CH3 Mux", 266 SND_SOC_DAPM_MUX("Digital CH3 Mux",
267 SND_SOC_NOPM, 0, 0, &digital_ch3_mux), 267 SND_SOC_NOPM, 0, 0, &digital_ch3_mux),
268 SND_SOC_DAPM_MUX("Digital CH2 Mux", 268 SND_SOC_DAPM_MUX("Digital CH2 Mux",
269 SND_SOC_NOPM, 0, 0, &digital_ch2_mux), 269 SND_SOC_NOPM, 0, 0, &digital_ch2_mux),
270 SND_SOC_DAPM_MUX("Digital CH1 Mux", 270 SND_SOC_DAPM_MUX("Digital CH1 Mux",
271 SND_SOC_NOPM, 0, 0, &digital_ch1_mux), 271 SND_SOC_NOPM, 0, 0, &digital_ch1_mux),
272 272
273 SND_SOC_DAPM_AIF_OUT("AIFTX", "Capture", 0, SND_SOC_NOPM, 0, 0), 273 SND_SOC_DAPM_AIF_OUT("AIFTX", "Capture", 0, SND_SOC_NOPM, 0, 0),
274}; 274};
275 275
276static const struct snd_soc_dapm_route nau8540_dapm_routes[] = { 276static const struct snd_soc_dapm_route nau8540_dapm_routes[] = {
277 {"Frontend PGA1", NULL, "MIC1"}, 277 {"Frontend PGA1", NULL, "MIC1"},
278 {"Frontend PGA2", NULL, "MIC2"}, 278 {"Frontend PGA2", NULL, "MIC2"},
279 {"Frontend PGA3", NULL, "MIC3"}, 279 {"Frontend PGA3", NULL, "MIC3"},
280 {"Frontend PGA4", NULL, "MIC4"}, 280 {"Frontend PGA4", NULL, "MIC4"},
281 281
282 {"ADC1", NULL, "Frontend PGA1"}, 282 {"ADC1", NULL, "Frontend PGA1"},
283 {"ADC2", NULL, "Frontend PGA2"}, 283 {"ADC2", NULL, "Frontend PGA2"},
284 {"ADC3", NULL, "Frontend PGA3"}, 284 {"ADC3", NULL, "Frontend PGA3"},
285 {"ADC4", NULL, "Frontend PGA4"}, 285 {"ADC4", NULL, "Frontend PGA4"},
286 286
287 {"ADC CH1", NULL, "ADC1"}, 287 {"ADC CH1", NULL, "ADC1"},
288 {"ADC CH2", NULL, "ADC2"}, 288 {"ADC CH2", NULL, "ADC2"},
289 {"ADC CH3", NULL, "ADC3"}, 289 {"ADC CH3", NULL, "ADC3"},
290 {"ADC CH4", NULL, "ADC4"}, 290 {"ADC CH4", NULL, "ADC4"},
291 291
292 {"ADC1", NULL, "MICBIAS1"}, 292 {"ADC1", NULL, "MICBIAS1"},
293 {"ADC2", NULL, "MICBIAS1"}, 293 {"ADC2", NULL, "MICBIAS1"},
294 {"ADC3", NULL, "MICBIAS2"}, 294 {"ADC3", NULL, "MICBIAS2"},
295 {"ADC4", NULL, "MICBIAS2"}, 295 {"ADC4", NULL, "MICBIAS2"},
296 296
297 {"Digital CH1 Mux", "ADC channel 1", "ADC CH1"}, 297 {"Digital CH1 Mux", "ADC channel 1", "ADC CH1"},
298 {"Digital CH1 Mux", "ADC channel 2", "ADC CH2"}, 298 {"Digital CH1 Mux", "ADC channel 2", "ADC CH2"},
299 {"Digital CH1 Mux", "ADC channel 3", "ADC CH3"}, 299 {"Digital CH1 Mux", "ADC channel 3", "ADC CH3"},
300 {"Digital CH1 Mux", "ADC channel 4", "ADC CH4"}, 300 {"Digital CH1 Mux", "ADC channel 4", "ADC CH4"},
301 301
302 {"Digital CH2 Mux", "ADC channel 1", "ADC CH1"}, 302 {"Digital CH2 Mux", "ADC channel 1", "ADC CH1"},
303 {"Digital CH2 Mux", "ADC channel 2", "ADC CH2"}, 303 {"Digital CH2 Mux", "ADC channel 2", "ADC CH2"},
304 {"Digital CH2 Mux", "ADC channel 3", "ADC CH3"}, 304 {"Digital CH2 Mux", "ADC channel 3", "ADC CH3"},
305 {"Digital CH2 Mux", "ADC channel 4", "ADC CH4"}, 305 {"Digital CH2 Mux", "ADC channel 4", "ADC CH4"},
306 306
307 {"Digital CH3 Mux", "ADC channel 1", "ADC CH1"}, 307 {"Digital CH3 Mux", "ADC channel 1", "ADC CH1"},
308 {"Digital CH3 Mux", "ADC channel 2", "ADC CH2"}, 308 {"Digital CH3 Mux", "ADC channel 2", "ADC CH2"},
309 {"Digital CH3 Mux", "ADC channel 3", "ADC CH3"}, 309 {"Digital CH3 Mux", "ADC channel 3", "ADC CH3"},
310 {"Digital CH3 Mux", "ADC channel 4", "ADC CH4"}, 310 {"Digital CH3 Mux", "ADC channel 4", "ADC CH4"},
311 311
312 {"Digital CH4 Mux", "ADC channel 1", "ADC CH1"}, 312 {"Digital CH4 Mux", "ADC channel 1", "ADC CH1"},
313 {"Digital CH4 Mux", "ADC channel 2", "ADC CH2"}, 313 {"Digital CH4 Mux", "ADC channel 2", "ADC CH2"},
314 {"Digital CH4 Mux", "ADC channel 3", "ADC CH3"}, 314 {"Digital CH4 Mux", "ADC channel 3", "ADC CH3"},
315 {"Digital CH4 Mux", "ADC channel 4", "ADC CH4"}, 315 {"Digital CH4 Mux", "ADC channel 4", "ADC CH4"},
316 316
317 {"AIFTX", NULL, "Digital CH1 Mux"}, 317 {"AIFTX", NULL, "Digital CH1 Mux"},
318 {"AIFTX", NULL, "Digital CH2 Mux"}, 318 {"AIFTX", NULL, "Digital CH2 Mux"},
319 {"AIFTX", NULL, "Digital CH3 Mux"}, 319 {"AIFTX", NULL, "Digital CH3 Mux"},
320 {"AIFTX", NULL, "Digital CH4 Mux"}, 320 {"AIFTX", NULL, "Digital CH4 Mux"},
321}; 321};
322 322
323static int nau8540_clock_check(struct nau8540 *nau8540, int rate, int osr) 323static int nau8540_clock_check(struct nau8540 *nau8540, int rate, int osr)
324{ 324{
325 int osrate; 325 int osrate;
326 326
327 if (osr >= ARRAY_SIZE(osr_adc_sel)) 327 if (osr >= ARRAY_SIZE(osr_adc_sel))
328 return -EINVAL; 328 return -EINVAL;
329 osrate = osr_adc_sel[osr].osr; 329 osrate = osr_adc_sel[osr].osr;
330 330
331 if (rate * osr > CLK_ADC_MAX) { 331 if (rate * osr > CLK_ADC_MAX) {
332 dev_err(nau8540->dev, "exceed the maximum frequency of CLK_ADC\n"); 332 dev_err(nau8540->dev, "exceed the maximum frequency of CLK_ADC\n");
333 return -EINVAL; 333 return -EINVAL;
334 } 334 }
335 335
336 return 0; 336 return 0;
337} 337}
338 338
339static int nau8540_hw_params(struct snd_pcm_substream *substream, 339static int nau8540_hw_params(struct snd_pcm_substream *substream,
340 struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) 340 struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
341{ 341{
342 struct snd_soc_codec *codec = dai->codec; 342 struct snd_soc_codec *codec = dai->codec;
343 struct nau8540 *nau8540 = snd_soc_codec_get_drvdata(codec); 343 struct nau8540 *nau8540 = snd_soc_codec_get_drvdata(codec);
344 unsigned int val_len = 0, osr; 344 unsigned int val_len = 0, osr;
345 345
346 /* CLK_ADC = OSR * FS 346 /* CLK_ADC = OSR * FS
347 * ADC clock frequency is defined as Over Sampling Rate (OSR) 347 * ADC clock frequency is defined as Over Sampling Rate (OSR)
348 * multiplied by the audio sample rate (Fs). Note that the OSR and Fs 348 * multiplied by the audio sample rate (Fs). Note that the OSR and Fs
349 * values must be selected such that the maximum frequency is less 349 * values must be selected such that the maximum frequency is less
350 * than 6.144 MHz. 350 * than 6.144 MHz.
351 */ 351 */
352 regmap_read(nau8540->regmap, NAU8540_REG_ADC_SAMPLE_RATE, &osr); 352 regmap_read(nau8540->regmap, NAU8540_REG_ADC_SAMPLE_RATE, &osr);
353 osr &= NAU8540_ADC_OSR_MASK; 353 osr &= NAU8540_ADC_OSR_MASK;
354 if (nau8540_clock_check(nau8540, params_rate(params), osr)) 354 if (nau8540_clock_check(nau8540, params_rate(params), osr))
355 return -EINVAL; 355 return -EINVAL;
356 regmap_update_bits(nau8540->regmap, NAU8540_REG_CLOCK_SRC, 356 regmap_update_bits(nau8540->regmap, NAU8540_REG_CLOCK_SRC,
357 NAU8540_CLK_ADC_SRC_MASK, 357 NAU8540_CLK_ADC_SRC_MASK,
358 osr_adc_sel[osr].clk_src << NAU8540_CLK_ADC_SRC_SFT); 358 osr_adc_sel[osr].clk_src << NAU8540_CLK_ADC_SRC_SFT);
359 359
360 switch (params_width(params)) { 360 switch (params_width(params)) {
361 case 16: 361 case 16:
362 val_len |= NAU8540_I2S_DL_16; 362 val_len |= NAU8540_I2S_DL_16;
363 break; 363 break;
364 case 20: 364 case 20:
365 val_len |= NAU8540_I2S_DL_20; 365 val_len |= NAU8540_I2S_DL_20;
366 break; 366 break;
367 case 24: 367 case 24:
368 val_len |= NAU8540_I2S_DL_24; 368 val_len |= NAU8540_I2S_DL_24;
369 break; 369 break;
370 case 32: 370 case 32:
371 val_len |= NAU8540_I2S_DL_32; 371 val_len |= NAU8540_I2S_DL_32;
372 break; 372 break;
373 default: 373 default:
374 return -EINVAL; 374 return -EINVAL;
375 } 375 }
376 376
377 regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL0, 377 regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL0,
378 NAU8540_I2S_DL_MASK, val_len); 378 NAU8540_I2S_DL_MASK, val_len);
379 379
380 return 0; 380 return 0;
381} 381}
382 382
383static int nau8540_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) 383static int nau8540_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
384{ 384{
385 struct snd_soc_codec *codec = dai->codec; 385 struct snd_soc_codec *codec = dai->codec;
386 struct nau8540 *nau8540 = snd_soc_codec_get_drvdata(codec); 386 struct nau8540 *nau8540 = snd_soc_codec_get_drvdata(codec);
387 unsigned int ctrl1_val = 0, ctrl2_val = 0; 387 unsigned int ctrl1_val = 0, ctrl2_val = 0;
388 388
389 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { 389 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
390 case SND_SOC_DAIFMT_CBM_CFM: 390 case SND_SOC_DAIFMT_CBM_CFM:
391 ctrl2_val |= NAU8540_I2S_MS_MASTER; 391 ctrl2_val |= NAU8540_I2S_MS_MASTER;
392 break; 392 break;
393 case SND_SOC_DAIFMT_CBS_CFS: 393 case SND_SOC_DAIFMT_CBS_CFS:
394 break; 394 break;
395 default: 395 default:
396 return -EINVAL; 396 return -EINVAL;
397 } 397 }
398 398
399 switch (fmt & SND_SOC_DAIFMT_INV_MASK) { 399 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
400 case SND_SOC_DAIFMT_NB_NF: 400 case SND_SOC_DAIFMT_NB_NF:
401 break; 401 break;
402 case SND_SOC_DAIFMT_IB_NF: 402 case SND_SOC_DAIFMT_IB_NF:
403 ctrl1_val |= NAU8540_I2S_BP_INV; 403 ctrl1_val |= NAU8540_I2S_BP_INV;
404 break; 404 break;
405 default: 405 default:
406 return -EINVAL; 406 return -EINVAL;
407 } 407 }
408 408
409 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 409 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
410 case SND_SOC_DAIFMT_I2S: 410 case SND_SOC_DAIFMT_I2S:
411 ctrl1_val |= NAU8540_I2S_DF_I2S; 411 ctrl1_val |= NAU8540_I2S_DF_I2S;
412 break; 412 break;
413 case SND_SOC_DAIFMT_LEFT_J: 413 case SND_SOC_DAIFMT_LEFT_J:
414 ctrl1_val |= NAU8540_I2S_DF_LEFT; 414 ctrl1_val |= NAU8540_I2S_DF_LEFT;
415 break; 415 break;
416 case SND_SOC_DAIFMT_RIGHT_J: 416 case SND_SOC_DAIFMT_RIGHT_J:
417 ctrl1_val |= NAU8540_I2S_DF_RIGTH; 417 ctrl1_val |= NAU8540_I2S_DF_RIGTH;
418 break; 418 break;
419 case SND_SOC_DAIFMT_DSP_A: 419 case SND_SOC_DAIFMT_DSP_A:
420 ctrl1_val |= NAU8540_I2S_DF_PCM_AB; 420 ctrl1_val |= NAU8540_I2S_DF_PCM_AB;
421 break; 421 break;
422 case SND_SOC_DAIFMT_DSP_B: 422 case SND_SOC_DAIFMT_DSP_B:
423 ctrl1_val |= NAU8540_I2S_DF_PCM_AB; 423 ctrl1_val |= NAU8540_I2S_DF_PCM_AB;
424 ctrl1_val |= NAU8540_I2S_PCMB_EN; 424 ctrl1_val |= NAU8540_I2S_PCMB_EN;
425 break; 425 break;
426 default: 426 default:
427 return -EINVAL; 427 return -EINVAL;
428 } 428 }
429 429
430 regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL0, 430 regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL0,
431 NAU8540_I2S_DL_MASK | NAU8540_I2S_DF_MASK | 431 NAU8540_I2S_DL_MASK | NAU8540_I2S_DF_MASK |
432 NAU8540_I2S_BP_INV | NAU8540_I2S_PCMB_EN, ctrl1_val); 432 NAU8540_I2S_BP_INV | NAU8540_I2S_PCMB_EN, ctrl1_val);
433 regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL1, 433 regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL1,
434 NAU8540_I2S_MS_MASK | NAU8540_I2S_DO12_OE, ctrl2_val); 434 NAU8540_I2S_MS_MASK | NAU8540_I2S_DO12_OE, ctrl2_val);
435 regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL2, 435 regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL2,
436 NAU8540_I2S_DO34_OE, 0); 436 NAU8540_I2S_DO34_OE, 0);
437 437
438 return 0; 438 return 0;
439} 439}
440 440
441/** 441/**
@@ -451,55 +451,55 @@ static int nau8540_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
451 * Configures a DAI for TDM operation. Only support 4 slots TDM. 451 * Configures a DAI for TDM operation. Only support 4 slots TDM.
452 */ 452 */
453static int nau8540_set_tdm_slot(struct snd_soc_dai *dai, 453static int nau8540_set_tdm_slot(struct snd_soc_dai *dai,
454 unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) 454 unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width)
455{ 455{
456 struct snd_soc_codec *codec = dai->codec; 456 struct snd_soc_codec *codec = dai->codec;
457 struct nau8540 *nau8540 = snd_soc_codec_get_drvdata(codec); 457 struct nau8540 *nau8540 = snd_soc_codec_get_drvdata(codec);
458 unsigned int ctrl2_val = 0, ctrl4_val = 0; 458 unsigned int ctrl2_val = 0, ctrl4_val = 0;
459 459
460 if (slots > 4 || ((tx_mask & 0xf0) && (tx_mask & 0xf))) 460 if (slots > 4 || ((tx_mask & 0xf0) && (tx_mask & 0xf)))
461 return -EINVAL; 461 return -EINVAL;
462 462
463 ctrl4_val |= (NAU8540_TDM_MODE | NAU8540_TDM_OFFSET_EN); 463 ctrl4_val |= (NAU8540_TDM_MODE | NAU8540_TDM_OFFSET_EN);
464 if (tx_mask & 0xf0) { 464 if (tx_mask & 0xf0) {
465 ctrl2_val = 4 * slot_width; 465 ctrl2_val = 4 * slot_width;
466 ctrl4_val |= (tx_mask >> 4); 466 ctrl4_val |= (tx_mask >> 4);
467 } else { 467 } else {
468 ctrl4_val |= tx_mask; 468 ctrl4_val |= tx_mask;
469 } 469 }
470 regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL4, 470 regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL4,
471 NAU8540_TDM_MODE | NAU8540_TDM_OFFSET_EN | 471 NAU8540_TDM_MODE | NAU8540_TDM_OFFSET_EN |
472 NAU8540_TDM_TX_MASK, ctrl4_val); 472 NAU8540_TDM_TX_MASK, ctrl4_val);
473 regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL1, 473 regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL1,
474 NAU8540_I2S_DO12_OE, NAU8540_I2S_DO12_OE); 474 NAU8540_I2S_DO12_OE, NAU8540_I2S_DO12_OE);
475 regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL2, 475 regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL2,
476 NAU8540_I2S_DO34_OE | NAU8540_I2S_TSLOT_L_MASK, 476 NAU8540_I2S_DO34_OE | NAU8540_I2S_TSLOT_L_MASK,
477 NAU8540_I2S_DO34_OE | ctrl2_val); 477 NAU8540_I2S_DO34_OE | ctrl2_val);
478 478
479 return 0; 479 return 0;
480} 480}
481 481
482 482
483static const struct snd_soc_dai_ops nau8540_dai_ops = { 483static const struct snd_soc_dai_ops nau8540_dai_ops = {
484 .hw_params = nau8540_hw_params, 484 .hw_params = nau8540_hw_params,
485 .set_fmt = nau8540_set_fmt, 485 .set_fmt = nau8540_set_fmt,
486 .set_tdm_slot = nau8540_set_tdm_slot, 486 .set_tdm_slot = nau8540_set_tdm_slot,
487}; 487};
488 488
489#define NAU8540_RATES SNDRV_PCM_RATE_8000_48000 489#define NAU8540_RATES SNDRV_PCM_RATE_8000_48000
490#define NAU8540_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE \ 490#define NAU8540_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE \
491 | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE) 491 | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE)
492 492
493static struct snd_soc_dai_driver nau8540_dai = { 493static struct snd_soc_dai_driver nau8540_dai = {
494 .name = "nau8540-hifi", 494 .name = "nau8540-hifi",
495 .capture = { 495 .capture = {
496 .stream_name = "Capture", 496 .stream_name = "Capture",
497 .channels_min = 1, 497 .channels_min = 1,
498 .channels_max = 4, 498 .channels_max = 4,
499 .rates = NAU8540_RATES, 499 .rates = NAU8540_RATES,
500 .formats = NAU8540_FORMATS, 500 .formats = NAU8540_FORMATS,
501 }, 501 },
502 .ops = &nau8540_dai_ops, 502 .ops = &nau8540_dai_ops,
503}; 503};
504 504
505/** 505/**
@@ -513,320 +513,320 @@ static struct snd_soc_dai_driver nau8540_dai = {
513 * Returns 0 for success or negative error code. 513 * Returns 0 for success or negative error code.
514 */ 514 */
515static int nau8540_calc_fll_param(unsigned int fll_in, 515static int nau8540_calc_fll_param(unsigned int fll_in,
516 unsigned int fs, struct nau8540_fll *fll_param) 516 unsigned int fs, struct nau8540_fll *fll_param)
517{ 517{
518 u64 fvco, fvco_max; 518 u64 fvco, fvco_max;
519 unsigned int fref, i, fvco_sel; 519 unsigned int fref, i, fvco_sel;
520 520
521 /* Ensure the reference clock frequency (FREF) is <= 13.5MHz by dividing 521 /* Ensure the reference clock frequency (FREF) is <= 13.5MHz by dividing
522 * freq_in by 1, 2, 4, or 8 using FLL pre-scalar. 522 * freq_in by 1, 2, 4, or 8 using FLL pre-scalar.
523 * FREF = freq_in / NAU8540_FLL_REF_DIV_MASK 523 * FREF = freq_in / NAU8540_FLL_REF_DIV_MASK
524 */ 524 */
525 for (i = 0; i < ARRAY_SIZE(fll_pre_scalar); i++) { 525 for (i = 0; i < ARRAY_SIZE(fll_pre_scalar); i++) {
526 fref = fll_in / fll_pre_scalar[i].param; 526 fref = fll_in / fll_pre_scalar[i].param;
527 if (fref <= NAU_FREF_MAX) 527 if (fref <= NAU_FREF_MAX)
528 break; 528 break;
529 } 529 }
530 if (i == ARRAY_SIZE(fll_pre_scalar)) 530 if (i == ARRAY_SIZE(fll_pre_scalar))
531 return -EINVAL; 531 return -EINVAL;
532 fll_param->clk_ref_div = fll_pre_scalar[i].val; 532 fll_param->clk_ref_div = fll_pre_scalar[i].val;
533 533
534 /* Choose the FLL ratio based on FREF */ 534 /* Choose the FLL ratio based on FREF */
535 for (i = 0; i < ARRAY_SIZE(fll_ratio); i++) { 535 for (i = 0; i < ARRAY_SIZE(fll_ratio); i++) {
536 if (fref >= fll_ratio[i].param) 536 if (fref >= fll_ratio[i].param)
537 break; 537 break;
538 } 538 }
539 if (i == ARRAY_SIZE(fll_ratio)) 539 if (i == ARRAY_SIZE(fll_ratio))
540 return -EINVAL; 540 return -EINVAL;
541 fll_param->ratio = fll_ratio[i].val; 541 fll_param->ratio = fll_ratio[i].val;
542 542
543 /* Calculate the frequency of DCO (FDCO) given freq_out = 256 * Fs. 543 /* Calculate the frequency of DCO (FDCO) given freq_out = 256 * Fs.
544 * FDCO must be within the 90MHz - 124MHz or the FFL cannot be 544 * FDCO must be within the 90MHz - 124MHz or the FFL cannot be
545 * guaranteed across the full range of operation. 545 * guaranteed across the full range of operation.
546 * FDCO = freq_out * 2 * mclk_src_scaling 546 * FDCO = freq_out * 2 * mclk_src_scaling
547 */ 547 */
548 fvco_max = 0; 548 fvco_max = 0;
549 fvco_sel = ARRAY_SIZE(mclk_src_scaling); 549 fvco_sel = ARRAY_SIZE(mclk_src_scaling);
550 for (i = 0; i < ARRAY_SIZE(mclk_src_scaling); i++) { 550 for (i = 0; i < ARRAY_SIZE(mclk_src_scaling); i++) {
551 fvco = 256 * fs * 2 * mclk_src_scaling[i].param; 551 fvco = 256 * fs * 2 * mclk_src_scaling[i].param;
552 if (fvco > NAU_FVCO_MIN && fvco < NAU_FVCO_MAX && 552 if (fvco > NAU_FVCO_MIN && fvco < NAU_FVCO_MAX &&
553 fvco_max < fvco) { 553 fvco_max < fvco) {
554 fvco_max = fvco; 554 fvco_max = fvco;
555 fvco_sel = i; 555 fvco_sel = i;
556 } 556 }
557 } 557 }
558 if (ARRAY_SIZE(mclk_src_scaling) == fvco_sel) 558 if (ARRAY_SIZE(mclk_src_scaling) == fvco_sel)
559 return -EINVAL; 559 return -EINVAL;
560 fll_param->mclk_src = mclk_src_scaling[fvco_sel].val; 560 fll_param->mclk_src = mclk_src_scaling[fvco_sel].val;
561 561
562 /* Calculate the FLL 10-bit integer input and the FLL 16-bit fractional 562 /* Calculate the FLL 10-bit integer input and the FLL 16-bit fractional
563 * input based on FDCO, FREF and FLL ratio. 563 * input based on FDCO, FREF and FLL ratio.
564 */ 564 */
565 fvco = div_u64(fvco_max << 16, fref * fll_param->ratio); 565 fvco = div_u64(fvco_max << 16, fref * fll_param->ratio);
566 fll_param->fll_int = (fvco >> 16) & 0x3FF; 566 fll_param->fll_int = (fvco >> 16) & 0x3FF;
567 fll_param->fll_frac = fvco & 0xFFFF; 567 fll_param->fll_frac = fvco & 0xFFFF;
568 return 0; 568 return 0;
569} 569}
570 570
571static void nau8540_fll_apply(struct regmap *regmap, 571static void nau8540_fll_apply(struct regmap *regmap,
572 struct nau8540_fll *fll_param) 572 struct nau8540_fll *fll_param)
573{ 573{
574 regmap_update_bits(regmap, NAU8540_REG_CLOCK_SRC, 574 regmap_update_bits(regmap, NAU8540_REG_CLOCK_SRC,
575 NAU8540_CLK_SRC_MASK | NAU8540_CLK_MCLK_SRC_MASK, 575 NAU8540_CLK_SRC_MASK | NAU8540_CLK_MCLK_SRC_MASK,
576 NAU8540_CLK_SRC_MCLK | fll_param->mclk_src); 576 NAU8540_CLK_SRC_MCLK | fll_param->mclk_src);
577 regmap_update_bits(regmap, NAU8540_REG_FLL1, 577 regmap_update_bits(regmap, NAU8540_REG_FLL1,
578 NAU8540_FLL_RATIO_MASK, fll_param->ratio); 578 NAU8540_FLL_RATIO_MASK, fll_param->ratio);
579 /* FLL 16-bit fractional input */ 579 /* FLL 16-bit fractional input */
580 regmap_write(regmap, NAU8540_REG_FLL2, fll_param->fll_frac); 580 regmap_write(regmap, NAU8540_REG_FLL2, fll_param->fll_frac);
581 /* FLL 10-bit integer input */ 581 /* FLL 10-bit integer input */
582 regmap_update_bits(regmap, NAU8540_REG_FLL3, 582 regmap_update_bits(regmap, NAU8540_REG_FLL3,
583 NAU8540_FLL_INTEGER_MASK, fll_param->fll_int); 583 NAU8540_FLL_INTEGER_MASK, fll_param->fll_int);
584 /* FLL pre-scaler */ 584 /* FLL pre-scaler */
585 regmap_update_bits(regmap, NAU8540_REG_FLL4, 585 regmap_update_bits(regmap, NAU8540_REG_FLL4,
586 NAU8540_FLL_REF_DIV_MASK, 586 NAU8540_FLL_REF_DIV_MASK,
587 fll_param->clk_ref_div << NAU8540_FLL_REF_DIV_SFT); 587 fll_param->clk_ref_div << NAU8540_FLL_REF_DIV_SFT);
588 regmap_update_bits(regmap, NAU8540_REG_FLL5, 588 regmap_update_bits(regmap, NAU8540_REG_FLL5,
589 NAU8540_FLL_CLK_SW_MASK, NAU8540_FLL_CLK_SW_REF); 589 NAU8540_FLL_CLK_SW_MASK, NAU8540_FLL_CLK_SW_REF);
590 regmap_update_bits(regmap, 590 regmap_update_bits(regmap,
591 NAU8540_REG_FLL6, NAU8540_DCO_EN, 0); 591 NAU8540_REG_FLL6, NAU8540_DCO_EN, 0);
592 if (fll_param->fll_frac) { 592 if (fll_param->fll_frac) {
593 regmap_update_bits(regmap, NAU8540_REG_FLL5, 593 regmap_update_bits(regmap, NAU8540_REG_FLL5,
594 NAU8540_FLL_PDB_DAC_EN | NAU8540_FLL_LOOP_FTR_EN | 594 NAU8540_FLL_PDB_DAC_EN | NAU8540_FLL_LOOP_FTR_EN |
595 NAU8540_FLL_FTR_SW_MASK, 595 NAU8540_FLL_FTR_SW_MASK,
596 NAU8540_FLL_PDB_DAC_EN | NAU8540_FLL_LOOP_FTR_EN | 596 NAU8540_FLL_PDB_DAC_EN | NAU8540_FLL_LOOP_FTR_EN |
597 NAU8540_FLL_FTR_SW_FILTER); 597 NAU8540_FLL_FTR_SW_FILTER);
598 regmap_update_bits(regmap, NAU8540_REG_FLL6, 598 regmap_update_bits(regmap, NAU8540_REG_FLL6,
599 NAU8540_SDM_EN, NAU8540_SDM_EN); 599 NAU8540_SDM_EN, NAU8540_SDM_EN);
600 } else { 600 } else {
601 regmap_update_bits(regmap, NAU8540_REG_FLL5, 601 regmap_update_bits(regmap, NAU8540_REG_FLL5,
602 NAU8540_FLL_PDB_DAC_EN | NAU8540_FLL_LOOP_FTR_EN | 602 NAU8540_FLL_PDB_DAC_EN | NAU8540_FLL_LOOP_FTR_EN |
603 NAU8540_FLL_FTR_SW_MASK, NAU8540_FLL_FTR_SW_ACCU); 603 NAU8540_FLL_FTR_SW_MASK, NAU8540_FLL_FTR_SW_ACCU);
604 regmap_update_bits(regmap, 604 regmap_update_bits(regmap,
605 NAU8540_REG_FLL6, NAU8540_SDM_EN, 0); 605 NAU8540_REG_FLL6, NAU8540_SDM_EN, 0);
606 } 606 }
607} 607}
608 608
609/* freq_out must be 256*Fs in order to achieve the best performance */ 609/* freq_out must be 256*Fs in order to achieve the best performance */
610static int nau8540_set_pll(struct snd_soc_codec *codec, int pll_id, int source, 610static int nau8540_set_pll(struct snd_soc_codec *codec, int pll_id, int source,
611 unsigned int freq_in, unsigned int freq_out) 611 unsigned int freq_in, unsigned int freq_out)
612{ 612{
613 struct nau8540 *nau8540 = snd_soc_codec_get_drvdata(codec); 613 struct nau8540 *nau8540 = snd_soc_codec_get_drvdata(codec);
614 struct nau8540_fll fll_param; 614 struct nau8540_fll fll_param;
615 int ret, fs; 615 int ret, fs;
616 616
617 switch (pll_id) { 617 switch (pll_id) {
618 case NAU8540_CLK_FLL_MCLK: 618 case NAU8540_CLK_FLL_MCLK:
619 regmap_update_bits(nau8540->regmap, NAU8540_REG_FLL3, 619 regmap_update_bits(nau8540->regmap, NAU8540_REG_FLL3,
620 NAU8540_FLL_CLK_SRC_MASK, NAU8540_FLL_CLK_SRC_MCLK); 620 NAU8540_FLL_CLK_SRC_MASK, NAU8540_FLL_CLK_SRC_MCLK);
621 break; 621 break;
622 622
623 case NAU8540_CLK_FLL_BLK: 623 case NAU8540_CLK_FLL_BLK:
624 regmap_update_bits(nau8540->regmap, NAU8540_REG_FLL3, 624 regmap_update_bits(nau8540->regmap, NAU8540_REG_FLL3,
625 NAU8540_FLL_CLK_SRC_MASK, NAU8540_FLL_CLK_SRC_BLK); 625 NAU8540_FLL_CLK_SRC_MASK, NAU8540_FLL_CLK_SRC_BLK);
626 break; 626 break;
627 627
628 case NAU8540_CLK_FLL_FS: 628 case NAU8540_CLK_FLL_FS:
629 regmap_update_bits(nau8540->regmap, NAU8540_REG_FLL3, 629 regmap_update_bits(nau8540->regmap, NAU8540_REG_FLL3,
630 NAU8540_FLL_CLK_SRC_MASK, NAU8540_FLL_CLK_SRC_FS); 630 NAU8540_FLL_CLK_SRC_MASK, NAU8540_FLL_CLK_SRC_FS);
631 break; 631 break;
632 632
633 default: 633 default:
634 dev_err(nau8540->dev, "Invalid clock id (%d)\n", pll_id); 634 dev_err(nau8540->dev, "Invalid clock id (%d)\n", pll_id);
635 return -EINVAL; 635 return -EINVAL;
636 } 636 }
637 dev_dbg(nau8540->dev, "Sysclk is %dHz and clock id is %d\n", 637 dev_dbg(nau8540->dev, "Sysclk is %dHz and clock id is %d\n",
638 freq_out, pll_id); 638 freq_out, pll_id);
639 639
640 fs = freq_out / 256; 640 fs = freq_out / 256;
641 ret = nau8540_calc_fll_param(freq_in, fs, &fll_param); 641 ret = nau8540_calc_fll_param(freq_in, fs, &fll_param);
642 if (ret < 0) { 642 if (ret < 0) {
643 dev_err(nau8540->dev, "Unsupported input clock %d\n", freq_in); 643 dev_err(nau8540->dev, "Unsupported input clock %d\n", freq_in);
644 return ret; 644 return ret;
645 } 645 }
646 dev_dbg(nau8540->dev, "mclk_src=%x ratio=%x fll_frac=%x fll_int=%x clk_ref_div=%x\n", 646 dev_dbg(nau8540->dev, "mclk_src=%x ratio=%x fll_frac=%x fll_int=%x clk_ref_div=%x\n",
647 fll_param.mclk_src, fll_param.ratio, fll_param.fll_frac, 647 fll_param.mclk_src, fll_param.ratio, fll_param.fll_frac,
648 fll_param.fll_int, fll_param.clk_ref_div); 648 fll_param.fll_int, fll_param.clk_ref_div);
649 649
650 nau8540_fll_apply(nau8540->regmap, &fll_param); 650 nau8540_fll_apply(nau8540->regmap, &fll_param);
651 mdelay(2); 651 mdelay(2);
652 regmap_update_bits(nau8540->regmap, NAU8540_REG_CLOCK_SRC, 652 regmap_update_bits(nau8540->regmap, NAU8540_REG_CLOCK_SRC,
653 NAU8540_CLK_SRC_MASK, NAU8540_CLK_SRC_VCO); 653 NAU8540_CLK_SRC_MASK, NAU8540_CLK_SRC_VCO);
654 654
655 return 0; 655 return 0;
656} 656}
657 657
658static int nau8540_set_sysclk(struct snd_soc_codec *codec, 658static int nau8540_set_sysclk(struct snd_soc_codec *codec,
659 int clk_id, int source, unsigned int freq, int dir) 659 int clk_id, int source, unsigned int freq, int dir)
660{ 660{
661 struct nau8540 *nau8540 = snd_soc_codec_get_drvdata(codec); 661 struct nau8540 *nau8540 = snd_soc_codec_get_drvdata(codec);
662 662
663 switch (clk_id) { 663 switch (clk_id) {
664 case NAU8540_CLK_DIS: 664 case NAU8540_CLK_DIS:
665 case NAU8540_CLK_MCLK: 665 case NAU8540_CLK_MCLK:
666 regmap_update_bits(nau8540->regmap, NAU8540_REG_CLOCK_SRC, 666 regmap_update_bits(nau8540->regmap, NAU8540_REG_CLOCK_SRC,
667 NAU8540_CLK_SRC_MASK, NAU8540_CLK_SRC_MCLK); 667 NAU8540_CLK_SRC_MASK, NAU8540_CLK_SRC_MCLK);
668 regmap_update_bits(nau8540->regmap, NAU8540_REG_FLL6, 668 regmap_update_bits(nau8540->regmap, NAU8540_REG_FLL6,
669 NAU8540_DCO_EN, 0); 669 NAU8540_DCO_EN, 0);
670 break; 670 break;
671 671
672 case NAU8540_CLK_INTERNAL: 672 case NAU8540_CLK_INTERNAL:
673 regmap_update_bits(nau8540->regmap, NAU8540_REG_FLL6, 673 regmap_update_bits(nau8540->regmap, NAU8540_REG_FLL6,
674 NAU8540_DCO_EN, NAU8540_DCO_EN); 674 NAU8540_DCO_EN, NAU8540_DCO_EN);
675 regmap_update_bits(nau8540->regmap, NAU8540_REG_CLOCK_SRC, 675 regmap_update_bits(nau8540->regmap, NAU8540_REG_CLOCK_SRC,
676 NAU8540_CLK_SRC_MASK, NAU8540_CLK_SRC_VCO); 676 NAU8540_CLK_SRC_MASK, NAU8540_CLK_SRC_VCO);
677 break; 677 break;
678 678
679 default: 679 default:
680 dev_err(nau8540->dev, "Invalid clock id (%d)\n", clk_id); 680 dev_err(nau8540->dev, "Invalid clock id (%d)\n", clk_id);
681 return -EINVAL; 681 return -EINVAL;
682 } 682 }
683 683
684 dev_dbg(nau8540->dev, "Sysclk is %dHz and clock id is %d\n", 684 dev_dbg(nau8540->dev, "Sysclk is %dHz and clock id is %d\n",
685 freq, clk_id); 685 freq, clk_id);
686 686
687 return 0; 687 return 0;
688} 688}
689 689
690static void nau8540_reset_chip(struct regmap *regmap) 690static void nau8540_reset_chip(struct regmap *regmap)
691{ 691{
692 regmap_write(regmap, NAU8540_REG_SW_RESET, 0x00); 692 regmap_write(regmap, NAU8540_REG_SW_RESET, 0x00);
693 regmap_write(regmap, NAU8540_REG_SW_RESET, 0x00); 693 regmap_write(regmap, NAU8540_REG_SW_RESET, 0x00);
694} 694}
695 695
696static void nau8540_init_regs(struct nau8540 *nau8540) 696static void nau8540_init_regs(struct nau8540 *nau8540)
697{ 697{
698 struct regmap *regmap = nau8540->regmap; 698 struct regmap *regmap = nau8540->regmap;
699 699
700 /* Enable Bias/VMID/VMID Tieoff */ 700 /* Enable Bias/VMID/VMID Tieoff */
701 regmap_update_bits(regmap, NAU8540_REG_VMID_CTRL, 701 regmap_update_bits(regmap, NAU8540_REG_VMID_CTRL,
702 NAU8540_VMID_EN | NAU8540_VMID_SEL_MASK, 702 NAU8540_VMID_EN | NAU8540_VMID_SEL_MASK,
703 NAU8540_VMID_EN | (0x2 << NAU8540_VMID_SEL_SFT)); 703 NAU8540_VMID_EN | (0x2 << NAU8540_VMID_SEL_SFT));
704 regmap_update_bits(regmap, NAU8540_REG_REFERENCE, 704 regmap_update_bits(regmap, NAU8540_REG_REFERENCE,
705 NAU8540_PRECHARGE_DIS | NAU8540_GLOBAL_BIAS_EN, 705 NAU8540_PRECHARGE_DIS | NAU8540_GLOBAL_BIAS_EN,
706 NAU8540_PRECHARGE_DIS | NAU8540_GLOBAL_BIAS_EN); 706 NAU8540_PRECHARGE_DIS | NAU8540_GLOBAL_BIAS_EN);
707 mdelay(2); 707 mdelay(2);
708 regmap_update_bits(regmap, NAU8540_REG_MIC_BIAS, 708 regmap_update_bits(regmap, NAU8540_REG_MIC_BIAS,
709 NAU8540_PU_PRE, NAU8540_PU_PRE); 709 NAU8540_PU_PRE, NAU8540_PU_PRE);
710 regmap_update_bits(regmap, NAU8540_REG_CLOCK_CTRL, 710 regmap_update_bits(regmap, NAU8540_REG_CLOCK_CTRL,
711 NAU8540_CLK_ADC_EN | NAU8540_CLK_I2S_EN, 711 NAU8540_CLK_ADC_EN | NAU8540_CLK_I2S_EN,
712 NAU8540_CLK_ADC_EN | NAU8540_CLK_I2S_EN); 712 NAU8540_CLK_ADC_EN | NAU8540_CLK_I2S_EN);
713 /* ADC OSR selection, CLK_ADC = Fs * OSR */ 713 /* ADC OSR selection, CLK_ADC = Fs * OSR */
714 regmap_update_bits(regmap, NAU8540_REG_ADC_SAMPLE_RATE, 714 regmap_update_bits(regmap, NAU8540_REG_ADC_SAMPLE_RATE,
715 NAU8540_ADC_OSR_MASK, NAU8540_ADC_OSR_64); 715 NAU8540_ADC_OSR_MASK, NAU8540_ADC_OSR_64);
716} 716}
717 717
718static int __maybe_unused nau8540_suspend(struct snd_soc_codec *codec) 718static int __maybe_unused nau8540_suspend(struct snd_soc_codec *codec)
719{ 719{
720 struct nau8540 *nau8540 = snd_soc_codec_get_drvdata(codec); 720 struct nau8540 *nau8540 = snd_soc_codec_get_drvdata(codec);
721 721
722 regcache_cache_only(nau8540->regmap, true); 722 regcache_cache_only(nau8540->regmap, true);
723 regcache_mark_dirty(nau8540->regmap); 723 regcache_mark_dirty(nau8540->regmap);
724 724
725 return 0; 725 return 0;
726} 726}
727 727
728static int __maybe_unused nau8540_resume(struct snd_soc_codec *codec) 728static int __maybe_unused nau8540_resume(struct snd_soc_codec *codec)
729{ 729{
730 struct nau8540 *nau8540 = snd_soc_codec_get_drvdata(codec); 730 struct nau8540 *nau8540 = snd_soc_codec_get_drvdata(codec);
731 731
732 regcache_cache_only(nau8540->regmap, false); 732 regcache_cache_only(nau8540->regmap, false);
733 regcache_sync(nau8540->regmap); 733 regcache_sync(nau8540->regmap);
734 734
735 return 0; 735 return 0;
736} 736}
737 737
738static struct snd_soc_codec_driver nau8540_codec_driver = { 738static struct snd_soc_codec_driver nau8540_codec_driver = {
739 .set_sysclk = nau8540_set_sysclk, 739 .set_sysclk = nau8540_set_sysclk,
740 .set_pll = nau8540_set_pll, 740 .set_pll = nau8540_set_pll,
741 .suspend = nau8540_suspend, 741 .suspend = nau8540_suspend,
742 .resume = nau8540_resume, 742 .resume = nau8540_resume,
743 .suspend_bias_off = true, 743 .suspend_bias_off = true,
744 744
745 .component_driver = { 745 .component_driver = {
746 .controls = nau8540_snd_controls, 746 .controls = nau8540_snd_controls,
747 .num_controls = ARRAY_SIZE(nau8540_snd_controls), 747 .num_controls = ARRAY_SIZE(nau8540_snd_controls),
748 .dapm_widgets = nau8540_dapm_widgets, 748 .dapm_widgets = nau8540_dapm_widgets,
749 .num_dapm_widgets = ARRAY_SIZE(nau8540_dapm_widgets), 749 .num_dapm_widgets = ARRAY_SIZE(nau8540_dapm_widgets),
750 .dapm_routes = nau8540_dapm_routes, 750 .dapm_routes = nau8540_dapm_routes,
751 .num_dapm_routes = ARRAY_SIZE(nau8540_dapm_routes), 751 .num_dapm_routes = ARRAY_SIZE(nau8540_dapm_routes),
752 }, 752 },
753}; 753};
754 754
755static const struct regmap_config nau8540_regmap_config = { 755static const struct regmap_config nau8540_regmap_config = {
756 .val_bits = 16, 756 .val_bits = 16,
757 .reg_bits = 16, 757 .reg_bits = 16,
758 758
759 .max_register = NAU8540_REG_MAX, 759 .max_register = NAU8540_REG_MAX,
760 .readable_reg = nau8540_readable_reg, 760 .readable_reg = nau8540_readable_reg,
761 .writeable_reg = nau8540_writeable_reg, 761 .writeable_reg = nau8540_writeable_reg,
762 .volatile_reg = nau8540_volatile_reg, 762 .volatile_reg = nau8540_volatile_reg,
763 763
764 .cache_type = REGCACHE_RBTREE, 764 .cache_type = REGCACHE_RBTREE,
765 .reg_defaults = nau8540_reg_defaults, 765 .reg_defaults = nau8540_reg_defaults,
766 .num_reg_defaults = ARRAY_SIZE(nau8540_reg_defaults), 766 .num_reg_defaults = ARRAY_SIZE(nau8540_reg_defaults),
767}; 767};
768 768
769static int nau8540_i2c_probe(struct i2c_client *i2c, 769static int nau8540_i2c_probe(struct i2c_client *i2c,
770 const struct i2c_device_id *id) 770 const struct i2c_device_id *id)
771{ 771{
772 struct device *dev = &i2c->dev; 772 struct device *dev = &i2c->dev;
773 struct nau8540 *nau8540 = dev_get_platdata(dev); 773 struct nau8540 *nau8540 = dev_get_platdata(dev);
774 int ret, value; 774 int ret, value;
775 775
776 if (!nau8540) { 776 if (!nau8540) {
777 nau8540 = devm_kzalloc(dev, sizeof(*nau8540), GFP_KERNEL); 777 nau8540 = devm_kzalloc(dev, sizeof(*nau8540), GFP_KERNEL);
778 if (!nau8540) 778 if (!nau8540)
779 return -ENOMEM; 779 return -ENOMEM;
780 } 780 }
781 i2c_set_clientdata(i2c, nau8540); 781 i2c_set_clientdata(i2c, nau8540);
782 782
783 nau8540->regmap = devm_regmap_init_i2c(i2c, &nau8540_regmap_config); 783 nau8540->regmap = devm_regmap_init_i2c(i2c, &nau8540_regmap_config);
784 if (IS_ERR(nau8540->regmap)) 784 if (IS_ERR(nau8540->regmap))
785 return PTR_ERR(nau8540->regmap); 785 return PTR_ERR(nau8540->regmap);
786 ret = regmap_read(nau8540->regmap, NAU8540_REG_I2C_DEVICE_ID, &value); 786 ret = regmap_read(nau8540->regmap, NAU8540_REG_I2C_DEVICE_ID, &value);
787 if (ret < 0) { 787 if (ret < 0) {
788 dev_err(dev, "Failed to read device id from the NAU85L40: %d\n", 788 dev_err(dev, "Failed to read device id from the NAU85L40: %d\n",
789 ret); 789 ret);
790 return ret; 790 return ret;
791 } 791 }
792 792
793 nau8540->dev = dev; 793 nau8540->dev = dev;
794 nau8540_reset_chip(nau8540->regmap); 794 nau8540_reset_chip(nau8540->regmap);
795 nau8540_init_regs(nau8540); 795 nau8540_init_regs(nau8540);
796 796
797 return snd_soc_register_codec(dev, 797 return snd_soc_register_codec(dev,
798 &nau8540_codec_driver, &nau8540_dai, 1); 798 &nau8540_codec_driver, &nau8540_dai, 1);
799} 799}
800 800
801static int nau8540_i2c_remove(struct i2c_client *client) 801static int nau8540_i2c_remove(struct i2c_client *client)
802{ 802{
803 snd_soc_unregister_codec(&client->dev); 803 snd_soc_unregister_codec(&client->dev);
804 return 0; 804 return 0;
805} 805}
806 806
807 807
808static const struct i2c_device_id nau8540_i2c_ids[] = { 808static const struct i2c_device_id nau8540_i2c_ids[] = {
809 { "nau8540", 0 }, 809 { "nau8540", 0 },
810 { } 810 { }
811}; 811};
812MODULE_DEVICE_TABLE(i2c, nau8540_i2c_ids); 812MODULE_DEVICE_TABLE(i2c, nau8540_i2c_ids);
813 813
814#ifdef CONFIG_OF 814#ifdef CONFIG_OF
815static const struct of_device_id nau8540_of_ids[] = { 815static const struct of_device_id nau8540_of_ids[] = {
816 { .compatible = "nuvoton,nau8540", }, 816 { .compatible = "nuvoton,nau8540", },
817 {} 817 {}
818}; 818};
819MODULE_DEVICE_TABLE(of, nau8540_of_ids); 819MODULE_DEVICE_TABLE(of, nau8540_of_ids);
820#endif 820#endif
821 821
822static struct i2c_driver nau8540_i2c_driver = { 822static struct i2c_driver nau8540_i2c_driver = {
823 .driver = { 823 .driver = {
824 .name = "nau8540", 824 .name = "nau8540",
825 .of_match_table = of_match_ptr(nau8540_of_ids), 825 .of_match_table = of_match_ptr(nau8540_of_ids),
826 }, 826 },
827 .probe = nau8540_i2c_probe, 827 .probe = nau8540_i2c_probe,
828 .remove = nau8540_i2c_remove, 828 .remove = nau8540_i2c_remove,
829 .id_table = nau8540_i2c_ids, 829 .id_table = nau8540_i2c_ids,
830}; 830};
831module_i2c_driver(nau8540_i2c_driver); 831module_i2c_driver(nau8540_i2c_driver);
832 832
diff --git a/sound/soc/codecs/nau8540.h b/sound/soc/codecs/nau8540.h
index d06e65188cd5..5db5b224944d 100644
--- a/sound/soc/codecs/nau8540.h
+++ b/sound/soc/codecs/nau8540.h
@@ -12,211 +12,211 @@
12#ifndef __NAU8540_H__ 12#ifndef __NAU8540_H__
13#define __NAU8540_H__ 13#define __NAU8540_H__
14 14
15#define NAU8540_REG_SW_RESET 0x00 15#define NAU8540_REG_SW_RESET 0x00
16#define NAU8540_REG_POWER_MANAGEMENT 0x01 16#define NAU8540_REG_POWER_MANAGEMENT 0x01
17#define NAU8540_REG_CLOCK_CTRL 0x02 17#define NAU8540_REG_CLOCK_CTRL 0x02
18#define NAU8540_REG_CLOCK_SRC 0x03 18#define NAU8540_REG_CLOCK_SRC 0x03
19#define NAU8540_REG_FLL1 0x04 19#define NAU8540_REG_FLL1 0x04
20#define NAU8540_REG_FLL2 0x05 20#define NAU8540_REG_FLL2 0x05
21#define NAU8540_REG_FLL3 0x06 21#define NAU8540_REG_FLL3 0x06
22#define NAU8540_REG_FLL4 0x07 22#define NAU8540_REG_FLL4 0x07
23#define NAU8540_REG_FLL5 0x08 23#define NAU8540_REG_FLL5 0x08
24#define NAU8540_REG_FLL6 0x09 24#define NAU8540_REG_FLL6 0x09
25#define NAU8540_REG_FLL_VCO_RSV 0x0A 25#define NAU8540_REG_FLL_VCO_RSV 0x0A
26#define NAU8540_REG_PCM_CTRL0 0x10 26#define NAU8540_REG_PCM_CTRL0 0x10
27#define NAU8540_REG_PCM_CTRL1 0x11 27#define NAU8540_REG_PCM_CTRL1 0x11
28#define NAU8540_REG_PCM_CTRL2 0x12 28#define NAU8540_REG_PCM_CTRL2 0x12
29#define NAU8540_REG_PCM_CTRL3 0x13 29#define NAU8540_REG_PCM_CTRL3 0x13
30#define NAU8540_REG_PCM_CTRL4 0x14 30#define NAU8540_REG_PCM_CTRL4 0x14
31#define NAU8540_REG_ALC_CONTROL_1 0x20 31#define NAU8540_REG_ALC_CONTROL_1 0x20
32#define NAU8540_REG_ALC_CONTROL_2 0x21 32#define NAU8540_REG_ALC_CONTROL_2 0x21
33#define NAU8540_REG_ALC_CONTROL_3 0x22 33#define NAU8540_REG_ALC_CONTROL_3 0x22
34#define NAU8540_REG_ALC_CONTROL_4 0x23 34#define NAU8540_REG_ALC_CONTROL_4 0x23
35#define NAU8540_REG_ALC_CONTROL_5 0x24 35#define NAU8540_REG_ALC_CONTROL_5 0x24
36#define NAU8540_REG_ALC_GAIN_CH12 0x2D 36#define NAU8540_REG_ALC_GAIN_CH12 0x2D
37#define NAU8540_REG_ALC_GAIN_CH34 0x2E 37#define NAU8540_REG_ALC_GAIN_CH34 0x2E
38#define NAU8540_REG_ALC_STATUS 0x2F 38#define NAU8540_REG_ALC_STATUS 0x2F
39#define NAU8540_REG_NOTCH_FIL1_CH1 0x30 39#define NAU8540_REG_NOTCH_FIL1_CH1 0x30
40#define NAU8540_REG_NOTCH_FIL2_CH1 0x31 40#define NAU8540_REG_NOTCH_FIL2_CH1 0x31
41#define NAU8540_REG_NOTCH_FIL1_CH2 0x32 41#define NAU8540_REG_NOTCH_FIL1_CH2 0x32
42#define NAU8540_REG_NOTCH_FIL2_CH2 0x33 42#define NAU8540_REG_NOTCH_FIL2_CH2 0x33
43#define NAU8540_REG_NOTCH_FIL1_CH3 0x34 43#define NAU8540_REG_NOTCH_FIL1_CH3 0x34
44#define NAU8540_REG_NOTCH_FIL2_CH3 0x35 44#define NAU8540_REG_NOTCH_FIL2_CH3 0x35
45#define NAU8540_REG_NOTCH_FIL1_CH4 0x36 45#define NAU8540_REG_NOTCH_FIL1_CH4 0x36
46#define NAU8540_REG_NOTCH_FIL2_CH4 0x37 46#define NAU8540_REG_NOTCH_FIL2_CH4 0x37
47#define NAU8540_REG_HPF_FILTER_CH12 0x38 47#define NAU8540_REG_HPF_FILTER_CH12 0x38
48#define NAU8540_REG_HPF_FILTER_CH34 0x39 48#define NAU8540_REG_HPF_FILTER_CH34 0x39
49#define NAU8540_REG_ADC_SAMPLE_RATE 0x3A 49#define NAU8540_REG_ADC_SAMPLE_RATE 0x3A
50#define NAU8540_REG_DIGITAL_GAIN_CH1 0x40 50#define NAU8540_REG_DIGITAL_GAIN_CH1 0x40
51#define NAU8540_REG_DIGITAL_GAIN_CH2 0x41 51#define NAU8540_REG_DIGITAL_GAIN_CH2 0x41
52#define NAU8540_REG_DIGITAL_GAIN_CH3 0x42 52#define NAU8540_REG_DIGITAL_GAIN_CH3 0x42
53#define NAU8540_REG_DIGITAL_GAIN_CH4 0x43 53#define NAU8540_REG_DIGITAL_GAIN_CH4 0x43
54#define NAU8540_REG_DIGITAL_MUX 0x44 54#define NAU8540_REG_DIGITAL_MUX 0x44
55#define NAU8540_REG_P2P_CH1 0x48 55#define NAU8540_REG_P2P_CH1 0x48
56#define NAU8540_REG_P2P_CH2 0x49 56#define NAU8540_REG_P2P_CH2 0x49
57#define NAU8540_REG_P2P_CH3 0x4A 57#define NAU8540_REG_P2P_CH3 0x4A
58#define NAU8540_REG_P2P_CH4 0x4B 58#define NAU8540_REG_P2P_CH4 0x4B
59#define NAU8540_REG_PEAK_CH1 0x4C 59#define NAU8540_REG_PEAK_CH1 0x4C
60#define NAU8540_REG_PEAK_CH2 0x4D 60#define NAU8540_REG_PEAK_CH2 0x4D
61#define NAU8540_REG_PEAK_CH3 0x4E 61#define NAU8540_REG_PEAK_CH3 0x4E
62#define NAU8540_REG_PEAK_CH4 0x4F 62#define NAU8540_REG_PEAK_CH4 0x4F
63#define NAU8540_REG_GPIO_CTRL 0x50 63#define NAU8540_REG_GPIO_CTRL 0x50
64#define NAU8540_REG_MISC_CTRL 0x51 64#define NAU8540_REG_MISC_CTRL 0x51
65#define NAU8540_REG_I2C_CTRL 0x52 65#define NAU8540_REG_I2C_CTRL 0x52
66#define NAU8540_REG_I2C_DEVICE_ID 0x58 66#define NAU8540_REG_I2C_DEVICE_ID 0x58
67#define NAU8540_REG_RST 0x5A 67#define NAU8540_REG_RST 0x5A
68#define NAU8540_REG_VMID_CTRL 0x60 68#define NAU8540_REG_VMID_CTRL 0x60
69#define NAU8540_REG_MUTE 0x61 69#define NAU8540_REG_MUTE 0x61
70#define NAU8540_REG_ANALOG_ADC1 0x64 70#define NAU8540_REG_ANALOG_ADC1 0x64
71#define NAU8540_REG_ANALOG_ADC2 0x65 71#define NAU8540_REG_ANALOG_ADC2 0x65
72#define NAU8540_REG_ANALOG_PWR 0x66 72#define NAU8540_REG_ANALOG_PWR 0x66
73#define NAU8540_REG_MIC_BIAS 0x67 73#define NAU8540_REG_MIC_BIAS 0x67
74#define NAU8540_REG_REFERENCE 0x68 74#define NAU8540_REG_REFERENCE 0x68
75#define NAU8540_REG_FEPGA1 0x69 75#define NAU8540_REG_FEPGA1 0x69
76#define NAU8540_REG_FEPGA2 0x6A 76#define NAU8540_REG_FEPGA2 0x6A
77#define NAU8540_REG_FEPGA3 0x6B 77#define NAU8540_REG_FEPGA3 0x6B
78#define NAU8540_REG_FEPGA4 0x6C 78#define NAU8540_REG_FEPGA4 0x6C
79#define NAU8540_REG_PWR 0x6D 79#define NAU8540_REG_PWR 0x6D
80#define NAU8540_REG_MAX NAU8540_REG_PWR 80#define NAU8540_REG_MAX NAU8540_REG_PWR
81 81
82 82
83/* POWER_MANAGEMENT (0x01) */ 83/* POWER_MANAGEMENT (0x01) */
84#define NAU8540_ADC4_EN (0x1 << 3) 84#define NAU8540_ADC4_EN (0x1 << 3)
85#define NAU8540_ADC3_EN (0x1 << 2) 85#define NAU8540_ADC3_EN (0x1 << 2)
86#define NAU8540_ADC2_EN (0x1 << 1) 86#define NAU8540_ADC2_EN (0x1 << 1)
87#define NAU8540_ADC1_EN 0x1 87#define NAU8540_ADC1_EN 0x1
88 88
89/* CLOCK_CTRL (0x02) */ 89/* CLOCK_CTRL (0x02) */
90#define NAU8540_CLK_ADC_EN (0x1 << 15) 90#define NAU8540_CLK_ADC_EN (0x1 << 15)
91#define NAU8540_CLK_I2S_EN (0x1 << 1) 91#define NAU8540_CLK_I2S_EN (0x1 << 1)
92 92
93/* CLOCK_SRC (0x03) */ 93/* CLOCK_SRC (0x03) */
94#define NAU8540_CLK_SRC_SFT 15 94#define NAU8540_CLK_SRC_SFT 15
95#define NAU8540_CLK_SRC_MASK (1 << NAU8540_CLK_SRC_SFT) 95#define NAU8540_CLK_SRC_MASK (1 << NAU8540_CLK_SRC_SFT)
96#define NAU8540_CLK_SRC_VCO (1 << NAU8540_CLK_SRC_SFT) 96#define NAU8540_CLK_SRC_VCO (1 << NAU8540_CLK_SRC_SFT)
97#define NAU8540_CLK_SRC_MCLK (0 << NAU8540_CLK_SRC_SFT) 97#define NAU8540_CLK_SRC_MCLK (0 << NAU8540_CLK_SRC_SFT)
98#define NAU8540_CLK_ADC_SRC_SFT 6 98#define NAU8540_CLK_ADC_SRC_SFT 6
99#define NAU8540_CLK_ADC_SRC_MASK (0x3 << NAU8540_CLK_ADC_SRC_SFT) 99#define NAU8540_CLK_ADC_SRC_MASK (0x3 << NAU8540_CLK_ADC_SRC_SFT)
100#define NAU8540_CLK_MCLK_SRC_MASK 0xf 100#define NAU8540_CLK_MCLK_SRC_MASK 0xf
101 101
102/* FLL1 (0x04) */ 102/* FLL1 (0x04) */
103#define NAU8540_FLL_RATIO_MASK 0x7f 103#define NAU8540_FLL_RATIO_MASK 0x7f
104 104
105/* FLL3 (0x06) */ 105/* FLL3 (0x06) */
106#define NAU8540_FLL_CLK_SRC_SFT 10 106#define NAU8540_FLL_CLK_SRC_SFT 10
107#define NAU8540_FLL_CLK_SRC_MASK (0x3 << NAU8540_FLL_CLK_SRC_SFT) 107#define NAU8540_FLL_CLK_SRC_MASK (0x3 << NAU8540_FLL_CLK_SRC_SFT)
108#define NAU8540_FLL_CLK_SRC_MCLK (0 << NAU8540_FLL_CLK_SRC_SFT) 108#define NAU8540_FLL_CLK_SRC_MCLK (0 << NAU8540_FLL_CLK_SRC_SFT)
109#define NAU8540_FLL_CLK_SRC_BLK (0x2 << NAU8540_FLL_CLK_SRC_SFT) 109#define NAU8540_FLL_CLK_SRC_BLK (0x2 << NAU8540_FLL_CLK_SRC_SFT)
110#define NAU8540_FLL_CLK_SRC_FS (0x3 << NAU8540_FLL_CLK_SRC_SFT) 110#define NAU8540_FLL_CLK_SRC_FS (0x3 << NAU8540_FLL_CLK_SRC_SFT)
111#define NAU8540_FLL_INTEGER_MASK 0x3ff 111#define NAU8540_FLL_INTEGER_MASK 0x3ff
112 112
113/* FLL4 (0x07) */ 113/* FLL4 (0x07) */
114#define NAU8540_FLL_REF_DIV_SFT 10 114#define NAU8540_FLL_REF_DIV_SFT 10
115#define NAU8540_FLL_REF_DIV_MASK (0x3 << NAU8540_FLL_REF_DIV_SFT) 115#define NAU8540_FLL_REF_DIV_MASK (0x3 << NAU8540_FLL_REF_DIV_SFT)
116 116
117/* FLL5 (0x08) */ 117/* FLL5 (0x08) */
118#define NAU8540_FLL_PDB_DAC_EN (0x1 << 15) 118#define NAU8540_FLL_PDB_DAC_EN (0x1 << 15)
119#define NAU8540_FLL_LOOP_FTR_EN (0x1 << 14) 119#define NAU8540_FLL_LOOP_FTR_EN (0x1 << 14)
120#define NAU8540_FLL_CLK_SW_MASK (0x1 << 13) 120#define NAU8540_FLL_CLK_SW_MASK (0x1 << 13)
121#define NAU8540_FLL_CLK_SW_N2 (0x1 << 13) 121#define NAU8540_FLL_CLK_SW_N2 (0x1 << 13)
122#define NAU8540_FLL_CLK_SW_REF (0x0 << 13) 122#define NAU8540_FLL_CLK_SW_REF (0x0 << 13)
123#define NAU8540_FLL_FTR_SW_MASK (0x1 << 12) 123#define NAU8540_FLL_FTR_SW_MASK (0x1 << 12)
124#define NAU8540_FLL_FTR_SW_ACCU (0x1 << 12) 124#define NAU8540_FLL_FTR_SW_ACCU (0x1 << 12)
125#define NAU8540_FLL_FTR_SW_FILTER (0x0 << 12) 125#define NAU8540_FLL_FTR_SW_FILTER (0x0 << 12)
126 126
127/* FLL6 (0x9) */ 127/* FLL6 (0x9) */
128#define NAU8540_DCO_EN (0x1 << 15) 128#define NAU8540_DCO_EN (0x1 << 15)
129#define NAU8540_SDM_EN (0x1 << 14) 129#define NAU8540_SDM_EN (0x1 << 14)
130 130
131/* PCM_CTRL0 (0x10) */ 131/* PCM_CTRL0 (0x10) */
132#define NAU8540_I2S_BP_SFT 7 132#define NAU8540_I2S_BP_SFT 7
133#define NAU8540_I2S_BP_INV (0x1 << NAU8540_I2S_BP_SFT) 133#define NAU8540_I2S_BP_INV (0x1 << NAU8540_I2S_BP_SFT)
134#define NAU8540_I2S_PCMB_SFT 6 134#define NAU8540_I2S_PCMB_SFT 6
135#define NAU8540_I2S_PCMB_EN (0x1 << NAU8540_I2S_PCMB_SFT) 135#define NAU8540_I2S_PCMB_EN (0x1 << NAU8540_I2S_PCMB_SFT)
136#define NAU8540_I2S_DL_SFT 2 136#define NAU8540_I2S_DL_SFT 2
137#define NAU8540_I2S_DL_MASK (0x3 << NAU8540_I2S_DL_SFT) 137#define NAU8540_I2S_DL_MASK (0x3 << NAU8540_I2S_DL_SFT)
138#define NAU8540_I2S_DL_16 (0 << NAU8540_I2S_DL_SFT) 138#define NAU8540_I2S_DL_16 (0 << NAU8540_I2S_DL_SFT)
139#define NAU8540_I2S_DL_20 (0x1 << NAU8540_I2S_DL_SFT) 139#define NAU8540_I2S_DL_20 (0x1 << NAU8540_I2S_DL_SFT)
140#define NAU8540_I2S_DL_24 (0x2 << NAU8540_I2S_DL_SFT) 140#define NAU8540_I2S_DL_24 (0x2 << NAU8540_I2S_DL_SFT)
141#define NAU8540_I2S_DL_32 (0x3 << NAU8540_I2S_DL_SFT) 141#define NAU8540_I2S_DL_32 (0x3 << NAU8540_I2S_DL_SFT)
142#define NAU8540_I2S_DF_MASK 0x3 142#define NAU8540_I2S_DF_MASK 0x3
143#define NAU8540_I2S_DF_RIGTH 0 143#define NAU8540_I2S_DF_RIGTH 0
144#define NAU8540_I2S_DF_LEFT 0x1 144#define NAU8540_I2S_DF_LEFT 0x1
145#define NAU8540_I2S_DF_I2S 0x2 145#define NAU8540_I2S_DF_I2S 0x2
146#define NAU8540_I2S_DF_PCM_AB 0x3 146#define NAU8540_I2S_DF_PCM_AB 0x3
147 147
148/* PCM_CTRL1 (0x11) */ 148/* PCM_CTRL1 (0x11) */
149#define NAU8540_I2S_LRC_DIV_SFT 12 149#define NAU8540_I2S_LRC_DIV_SFT 12
150#define NAU8540_I2S_LRC_DIV_MASK (0x3 << NAU8540_I2S_LRC_DIV_SFT) 150#define NAU8540_I2S_LRC_DIV_MASK (0x3 << NAU8540_I2S_LRC_DIV_SFT)
151#define NAU8540_I2S_DO12_OE (0x1 << 4) 151#define NAU8540_I2S_DO12_OE (0x1 << 4)
152#define NAU8540_I2S_MS_SFT 3 152#define NAU8540_I2S_MS_SFT 3
153#define NAU8540_I2S_MS_MASK (0x1 << NAU8540_I2S_MS_SFT) 153#define NAU8540_I2S_MS_MASK (0x1 << NAU8540_I2S_MS_SFT)
154#define NAU8540_I2S_MS_MASTER (0x1 << NAU8540_I2S_MS_SFT) 154#define NAU8540_I2S_MS_MASTER (0x1 << NAU8540_I2S_MS_SFT)
155#define NAU8540_I2S_MS_SLAVE (0x0 << NAU8540_I2S_MS_SFT) 155#define NAU8540_I2S_MS_SLAVE (0x0 << NAU8540_I2S_MS_SFT)
156#define NAU8540_I2S_BLK_DIV_MASK 0x7 156#define NAU8540_I2S_BLK_DIV_MASK 0x7
157 157
158/* PCM_CTRL1 (0x12) */ 158/* PCM_CTRL1 (0x12) */
159#define NAU8540_I2S_DO34_OE (0x1 << 11) 159#define NAU8540_I2S_DO34_OE (0x1 << 11)
160#define NAU8540_I2S_TSLOT_L_MASK 0x3ff 160#define NAU8540_I2S_TSLOT_L_MASK 0x3ff
161 161
162/* PCM_CTRL4 (0x14) */ 162/* PCM_CTRL4 (0x14) */
163#define NAU8540_TDM_MODE (0x1 << 15) 163#define NAU8540_TDM_MODE (0x1 << 15)
164#define NAU8540_TDM_OFFSET_EN (0x1 << 14) 164#define NAU8540_TDM_OFFSET_EN (0x1 << 14)
165#define NAU8540_TDM_TX_MASK 0xf 165#define NAU8540_TDM_TX_MASK 0xf
166 166
167/* ADC_SAMPLE_RATE (0x3A) */ 167/* ADC_SAMPLE_RATE (0x3A) */
168#define NAU8540_ADC_OSR_MASK 0x3 168#define NAU8540_ADC_OSR_MASK 0x3
169#define NAU8540_ADC_OSR_256 0x3 169#define NAU8540_ADC_OSR_256 0x3
170#define NAU8540_ADC_OSR_128 0x2 170#define NAU8540_ADC_OSR_128 0x2
171#define NAU8540_ADC_OSR_64 0x1 171#define NAU8540_ADC_OSR_64 0x1
172#define NAU8540_ADC_OSR_32 0x0 172#define NAU8540_ADC_OSR_32 0x0
173 173
174/* VMID_CTRL (0x60) */ 174/* VMID_CTRL (0x60) */
175#define NAU8540_VMID_EN (1 << 6) 175#define NAU8540_VMID_EN (1 << 6)
176#define NAU8540_VMID_SEL_SFT 4 176#define NAU8540_VMID_SEL_SFT 4
177#define NAU8540_VMID_SEL_MASK (0x3 << NAU8540_VMID_SEL_SFT) 177#define NAU8540_VMID_SEL_MASK (0x3 << NAU8540_VMID_SEL_SFT)
178 178
179/* MIC_BIAS (0x67) */ 179/* MIC_BIAS (0x67) */
180#define NAU8540_PU_PRE (0x1 << 8) 180#define NAU8540_PU_PRE (0x1 << 8)
181 181
182/* REFERENCE (0x68) */ 182/* REFERENCE (0x68) */
183#define NAU8540_PRECHARGE_DIS (0x1 << 13) 183#define NAU8540_PRECHARGE_DIS (0x1 << 13)
184#define NAU8540_GLOBAL_BIAS_EN (0x1 << 12) 184#define NAU8540_GLOBAL_BIAS_EN (0x1 << 12)
185 185
186 186
187/* System Clock Source */ 187/* System Clock Source */
188enum { 188enum {
189 NAU8540_CLK_DIS, 189 NAU8540_CLK_DIS,
190 NAU8540_CLK_MCLK, 190 NAU8540_CLK_MCLK,
191 NAU8540_CLK_INTERNAL, 191 NAU8540_CLK_INTERNAL,
192 NAU8540_CLK_FLL_MCLK, 192 NAU8540_CLK_FLL_MCLK,
193 NAU8540_CLK_FLL_BLK, 193 NAU8540_CLK_FLL_BLK,
194 NAU8540_CLK_FLL_FS, 194 NAU8540_CLK_FLL_FS,
195}; 195};
196 196
197struct nau8540 { 197struct nau8540 {
198 struct device *dev; 198 struct device *dev;
199 struct regmap *regmap; 199 struct regmap *regmap;
200}; 200};
201 201
202struct nau8540_fll { 202struct nau8540_fll {
203 int mclk_src; 203 int mclk_src;
204 int ratio; 204 int ratio;
205 int fll_frac; 205 int fll_frac;
206 int fll_int; 206 int fll_int;
207 int clk_ref_div; 207 int clk_ref_div;
208}; 208};
209 209
210struct nau8540_fll_attr { 210struct nau8540_fll_attr {
211 unsigned int param; 211 unsigned int param;
212 unsigned int val; 212 unsigned int val;
213}; 213};
214 214
215/* over sampling rate */ 215/* over sampling rate */
216struct nau8540_osr_attr { 216struct nau8540_osr_attr {
217 unsigned int osr; 217 unsigned int osr;
218 unsigned int clk_src; 218 unsigned int clk_src;
219}; 219};
220 220
221 221
222#endif /* __NAU8540_H__ */ 222#endif /* __NAU8540_H__ */
diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c
new file mode 100644
index 000000000000..cca974d26136
--- /dev/null
+++ b/sound/soc/codecs/nau8824.c
@@ -0,0 +1,1831 @@
1/*
2 * NAU88L24 ALSA SoC audio driver
3 *
4 * Copyright 2016 Nuvoton Technology Corp.
5 * Author: John Hsu <KCHSU0@nuvoton.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/delay.h>
14#include <linux/init.h>
15#include <linux/i2c.h>
16#include <linux/regmap.h>
17#include <linux/slab.h>
18#include <linux/clk.h>
19#include <linux/acpi.h>
20#include <linux/math64.h>
21#include <linux/semaphore.h>
22
23#include <sound/initval.h>
24#include <sound/tlv.h>
25#include <sound/core.h>
26#include <sound/pcm.h>
27#include <sound/pcm_params.h>
28#include <sound/soc.h>
29#include <sound/jack.h>
30
31#include "nau8824.h"
32
33
34static int nau8824_config_sysclk(struct nau8824 *nau8824,
35 int clk_id, unsigned int freq);
36static bool nau8824_is_jack_inserted(struct nau8824 *nau8824);
37
38/* the ADC threshold of headset */
39#define DMIC_CLK 3072000
40
41/* the ADC threshold of headset */
42#define HEADSET_SARADC_THD 0x80
43
44/* the parameter threshold of FLL */
45#define NAU_FREF_MAX 13500000
46#define NAU_FVCO_MAX 124000000
47#define NAU_FVCO_MIN 90000000
48
49/* scaling for mclk from sysclk_src output */
50static const struct nau8824_fll_attr mclk_src_scaling[] = {
51 { 1, 0x0 },
52 { 2, 0x2 },
53 { 4, 0x3 },
54 { 8, 0x4 },
55 { 16, 0x5 },
56 { 32, 0x6 },
57 { 3, 0x7 },
58 { 6, 0xa },
59 { 12, 0xb },
60 { 24, 0xc },
61};
62
63/* ratio for input clk freq */
64static const struct nau8824_fll_attr fll_ratio[] = {
65 { 512000, 0x01 },
66 { 256000, 0x02 },
67 { 128000, 0x04 },
68 { 64000, 0x08 },
69 { 32000, 0x10 },
70 { 8000, 0x20 },
71 { 4000, 0x40 },
72};
73
74static const struct nau8824_fll_attr fll_pre_scalar[] = {
75 { 1, 0x0 },
76 { 2, 0x1 },
77 { 4, 0x2 },
78 { 8, 0x3 },
79};
80
81/* the maximum frequency of CLK_ADC and CLK_DAC */
82#define CLK_DA_AD_MAX 6144000
83
84/* over sampling rate */
85static const struct nau8824_osr_attr osr_dac_sel[] = {
86 { 64, 2 }, /* OSR 64, SRC 1/4 */
87 { 256, 0 }, /* OSR 256, SRC 1 */
88 { 128, 1 }, /* OSR 128, SRC 1/2 */
89 { 0, 0 },
90 { 32, 3 }, /* OSR 32, SRC 1/8 */
91};
92
93static const struct nau8824_osr_attr osr_adc_sel[] = {
94 { 32, 3 }, /* OSR 32, SRC 1/8 */
95 { 64, 2 }, /* OSR 64, SRC 1/4 */
96 { 128, 1 }, /* OSR 128, SRC 1/2 */
97 { 256, 0 }, /* OSR 256, SRC 1 */
98};
99
100static const struct reg_default nau8824_reg_defaults[] = {
101 { NAU8824_REG_ENA_CTRL, 0x0000 },
102 { NAU8824_REG_CLK_GATING_ENA, 0x0000 },
103 { NAU8824_REG_CLK_DIVIDER, 0x0000 },
104 { NAU8824_REG_FLL1, 0x0000 },
105 { NAU8824_REG_FLL2, 0x3126 },
106 { NAU8824_REG_FLL3, 0x0008 },
107 { NAU8824_REG_FLL4, 0x0010 },
108 { NAU8824_REG_FLL5, 0xC000 },
109 { NAU8824_REG_FLL6, 0x6000 },
110 { NAU8824_REG_FLL_VCO_RSV, 0xF13C },
111 { NAU8824_REG_JACK_DET_CTRL, 0x0000 },
112 { NAU8824_REG_INTERRUPT_SETTING_1, 0x0000 },
113 { NAU8824_REG_IRQ, 0x0000 },
114 { NAU8824_REG_CLEAR_INT_REG, 0x0000 },
115 { NAU8824_REG_INTERRUPT_SETTING, 0x1000 },
116 { NAU8824_REG_SAR_ADC, 0x0015 },
117 { NAU8824_REG_VDET_COEFFICIENT, 0x0110 },
118 { NAU8824_REG_VDET_THRESHOLD_1, 0x0000 },
119 { NAU8824_REG_VDET_THRESHOLD_2, 0x0000 },
120 { NAU8824_REG_VDET_THRESHOLD_3, 0x0000 },
121 { NAU8824_REG_VDET_THRESHOLD_4, 0x0000 },
122 { NAU8824_REG_GPIO_SEL, 0x0000 },
123 { NAU8824_REG_PORT0_I2S_PCM_CTRL_1, 0x000B },
124 { NAU8824_REG_PORT0_I2S_PCM_CTRL_2, 0x0010 },
125 { NAU8824_REG_PORT0_LEFT_TIME_SLOT, 0x0000 },
126 { NAU8824_REG_PORT0_RIGHT_TIME_SLOT, 0x0000 },
127 { NAU8824_REG_TDM_CTRL, 0x0000 },
128 { NAU8824_REG_ADC_HPF_FILTER, 0x0000 },
129 { NAU8824_REG_ADC_FILTER_CTRL, 0x0002 },
130 { NAU8824_REG_DAC_FILTER_CTRL_1, 0x0000 },
131 { NAU8824_REG_DAC_FILTER_CTRL_2, 0x0000 },
132 { NAU8824_REG_NOTCH_FILTER_1, 0x0000 },
133 { NAU8824_REG_NOTCH_FILTER_2, 0x0000 },
134 { NAU8824_REG_EQ1_LOW, 0x112C },
135 { NAU8824_REG_EQ2_EQ3, 0x2C2C },
136 { NAU8824_REG_EQ4_EQ5, 0x2C2C },
137 { NAU8824_REG_ADC_CH0_DGAIN_CTRL, 0x0100 },
138 { NAU8824_REG_ADC_CH1_DGAIN_CTRL, 0x0100 },
139 { NAU8824_REG_ADC_CH2_DGAIN_CTRL, 0x0100 },
140 { NAU8824_REG_ADC_CH3_DGAIN_CTRL, 0x0100 },
141 { NAU8824_REG_DAC_MUTE_CTRL, 0x0000 },
142 { NAU8824_REG_DAC_CH0_DGAIN_CTRL, 0x0100 },
143 { NAU8824_REG_DAC_CH1_DGAIN_CTRL, 0x0100 },
144 { NAU8824_REG_ADC_TO_DAC_ST, 0x0000 },
145 { NAU8824_REG_DRC_KNEE_IP12_ADC_CH01, 0x1486 },
146 { NAU8824_REG_DRC_KNEE_IP34_ADC_CH01, 0x0F12 },
147 { NAU8824_REG_DRC_SLOPE_ADC_CH01, 0x25FF },
148 { NAU8824_REG_DRC_ATKDCY_ADC_CH01, 0x3457 },
149 { NAU8824_REG_DRC_KNEE_IP12_ADC_CH23, 0x1486 },
150 { NAU8824_REG_DRC_KNEE_IP34_ADC_CH23, 0x0F12 },
151 { NAU8824_REG_DRC_SLOPE_ADC_CH23, 0x25FF },
152 { NAU8824_REG_DRC_ATKDCY_ADC_CH23, 0x3457 },
153 { NAU8824_REG_DRC_GAINL_ADC0, 0x0200 },
154 { NAU8824_REG_DRC_GAINL_ADC1, 0x0200 },
155 { NAU8824_REG_DRC_GAINL_ADC2, 0x0200 },
156 { NAU8824_REG_DRC_GAINL_ADC3, 0x0200 },
157 { NAU8824_REG_DRC_KNEE_IP12_DAC, 0x1486 },
158 { NAU8824_REG_DRC_KNEE_IP34_DAC, 0x0F12 },
159 { NAU8824_REG_DRC_SLOPE_DAC, 0x25F9 },
160 { NAU8824_REG_DRC_ATKDCY_DAC, 0x3457 },
161 { NAU8824_REG_DRC_GAIN_DAC_CH0, 0x0200 },
162 { NAU8824_REG_DRC_GAIN_DAC_CH1, 0x0200 },
163 { NAU8824_REG_MODE, 0x0000 },
164 { NAU8824_REG_MODE1, 0x0000 },
165 { NAU8824_REG_MODE2, 0x0000 },
166 { NAU8824_REG_CLASSG, 0x0000 },
167 { NAU8824_REG_OTP_EFUSE, 0x0000 },
168 { NAU8824_REG_OTPDOUT_1, 0x0000 },
169 { NAU8824_REG_OTPDOUT_2, 0x0000 },
170 { NAU8824_REG_MISC_CTRL, 0x0000 },
171 { NAU8824_REG_I2C_TIMEOUT, 0xEFFF },
172 { NAU8824_REG_TEST_MODE, 0x0000 },
173 { NAU8824_REG_I2C_DEVICE_ID, 0x1AF1 },
174 { NAU8824_REG_SAR_ADC_DATA_OUT, 0x00FF },
175 { NAU8824_REG_BIAS_ADJ, 0x0000 },
176 { NAU8824_REG_PGA_GAIN, 0x0000 },
177 { NAU8824_REG_TRIM_SETTINGS, 0x0000 },
178 { NAU8824_REG_ANALOG_CONTROL_1, 0x0000 },
179 { NAU8824_REG_ANALOG_CONTROL_2, 0x0000 },
180 { NAU8824_REG_ENABLE_LO, 0x0000 },
181 { NAU8824_REG_GAIN_LO, 0x0000 },
182 { NAU8824_REG_CLASSD_GAIN_1, 0x0000 },
183 { NAU8824_REG_CLASSD_GAIN_2, 0x0000 },
184 { NAU8824_REG_ANALOG_ADC_1, 0x0011 },
185 { NAU8824_REG_ANALOG_ADC_2, 0x0020 },
186 { NAU8824_REG_RDAC, 0x0008 },
187 { NAU8824_REG_MIC_BIAS, 0x0006 },
188 { NAU8824_REG_HS_VOLUME_CONTROL, 0x0000 },
189 { NAU8824_REG_BOOST, 0x0000 },
190 { NAU8824_REG_FEPGA, 0x0000 },
191 { NAU8824_REG_FEPGA_II, 0x0000 },
192 { NAU8824_REG_FEPGA_SE, 0x0000 },
193 { NAU8824_REG_FEPGA_ATTENUATION, 0x0000 },
194 { NAU8824_REG_ATT_PORT0, 0x0000 },
195 { NAU8824_REG_ATT_PORT1, 0x0000 },
196 { NAU8824_REG_POWER_UP_CONTROL, 0x0000 },
197 { NAU8824_REG_CHARGE_PUMP_CONTROL, 0x0300 },
198 { NAU8824_REG_CHARGE_PUMP_INPUT, 0x0013 },
199};
200
201static int nau8824_sema_acquire(struct nau8824 *nau8824, long timeout)
202{
203 int ret;
204
205 if (timeout) {
206 ret = down_timeout(&nau8824->jd_sem, timeout);
207 if (ret < 0)
208 dev_warn(nau8824->dev, "Acquire semaphone timeout\n");
209 } else {
210 ret = down_interruptible(&nau8824->jd_sem);
211 if (ret < 0)
212 dev_warn(nau8824->dev, "Acquire semaphone fail\n");
213 }
214
215 return ret;
216}
217
218static inline void nau8824_sema_release(struct nau8824 *nau8824)
219{
220 up(&nau8824->jd_sem);
221}
222
223static bool nau8824_readable_reg(struct device *dev, unsigned int reg)
224{
225 switch (reg) {
226 case NAU8824_REG_ENA_CTRL ... NAU8824_REG_FLL_VCO_RSV:
227 case NAU8824_REG_JACK_DET_CTRL:
228 case NAU8824_REG_INTERRUPT_SETTING_1:
229 case NAU8824_REG_IRQ:
230 case NAU8824_REG_CLEAR_INT_REG ... NAU8824_REG_VDET_THRESHOLD_4:
231 case NAU8824_REG_GPIO_SEL:
232 case NAU8824_REG_PORT0_I2S_PCM_CTRL_1 ... NAU8824_REG_TDM_CTRL:
233 case NAU8824_REG_ADC_HPF_FILTER ... NAU8824_REG_EQ4_EQ5:
234 case NAU8824_REG_ADC_CH0_DGAIN_CTRL ... NAU8824_REG_ADC_TO_DAC_ST:
235 case NAU8824_REG_DRC_KNEE_IP12_ADC_CH01 ... NAU8824_REG_DRC_GAINL_ADC3:
236 case NAU8824_REG_DRC_KNEE_IP12_DAC ... NAU8824_REG_DRC_GAIN_DAC_CH1:
237 case NAU8824_REG_CLASSG ... NAU8824_REG_OTP_EFUSE:
238 case NAU8824_REG_OTPDOUT_1 ... NAU8824_REG_OTPDOUT_2:
239 case NAU8824_REG_I2C_TIMEOUT:
240 case NAU8824_REG_I2C_DEVICE_ID ... NAU8824_REG_SAR_ADC_DATA_OUT:
241 case NAU8824_REG_BIAS_ADJ ... NAU8824_REG_CLASSD_GAIN_2:
242 case NAU8824_REG_ANALOG_ADC_1 ... NAU8824_REG_ATT_PORT1:
243 case NAU8824_REG_POWER_UP_CONTROL ... NAU8824_REG_CHARGE_PUMP_INPUT:
244 return true;
245 default:
246 return false;
247 }
248
249}
250
251static bool nau8824_writeable_reg(struct device *dev, unsigned int reg)
252{
253 switch (reg) {
254 case NAU8824_REG_RESET ... NAU8824_REG_FLL_VCO_RSV:
255 case NAU8824_REG_JACK_DET_CTRL:
256 case NAU8824_REG_INTERRUPT_SETTING_1:
257 case NAU8824_REG_CLEAR_INT_REG ... NAU8824_REG_VDET_THRESHOLD_4:
258 case NAU8824_REG_GPIO_SEL:
259 case NAU8824_REG_PORT0_I2S_PCM_CTRL_1 ... NAU8824_REG_TDM_CTRL:
260 case NAU8824_REG_ADC_HPF_FILTER ... NAU8824_REG_EQ4_EQ5:
261 case NAU8824_REG_ADC_CH0_DGAIN_CTRL ... NAU8824_REG_ADC_TO_DAC_ST:
262 case NAU8824_REG_DRC_KNEE_IP12_ADC_CH01:
263 case NAU8824_REG_DRC_KNEE_IP34_ADC_CH01:
264 case NAU8824_REG_DRC_SLOPE_ADC_CH01:
265 case NAU8824_REG_DRC_ATKDCY_ADC_CH01:
266 case NAU8824_REG_DRC_KNEE_IP12_ADC_CH23:
267 case NAU8824_REG_DRC_KNEE_IP34_ADC_CH23:
268 case NAU8824_REG_DRC_SLOPE_ADC_CH23:
269 case NAU8824_REG_DRC_ATKDCY_ADC_CH23:
270 case NAU8824_REG_DRC_KNEE_IP12_DAC ... NAU8824_REG_DRC_ATKDCY_DAC:
271 case NAU8824_REG_CLASSG ... NAU8824_REG_OTP_EFUSE:
272 case NAU8824_REG_I2C_TIMEOUT:
273 case NAU8824_REG_BIAS_ADJ ... NAU8824_REG_CLASSD_GAIN_2:
274 case NAU8824_REG_ANALOG_ADC_1 ... NAU8824_REG_ATT_PORT1:
275 case NAU8824_REG_POWER_UP_CONTROL ... NAU8824_REG_CHARGE_PUMP_CONTROL:
276 return true;
277 default:
278 return false;
279 }
280}
281
282static bool nau8824_volatile_reg(struct device *dev, unsigned int reg)
283{
284 switch (reg) {
285 case NAU8824_REG_RESET:
286 case NAU8824_REG_IRQ ... NAU8824_REG_CLEAR_INT_REG:
287 case NAU8824_REG_DRC_GAINL_ADC0 ... NAU8824_REG_DRC_GAINL_ADC3:
288 case NAU8824_REG_DRC_GAIN_DAC_CH0 ... NAU8824_REG_DRC_GAIN_DAC_CH1:
289 case NAU8824_REG_OTPDOUT_1 ... NAU8824_REG_OTPDOUT_2:
290 case NAU8824_REG_I2C_DEVICE_ID ... NAU8824_REG_SAR_ADC_DATA_OUT:
291 case NAU8824_REG_CHARGE_PUMP_INPUT:
292 return true;
293 default:
294 return false;
295 }
296}
297
298static const char * const nau8824_companding[] = {
299 "Off", "NC", "u-law", "A-law" };
300
301static const struct soc_enum nau8824_companding_adc_enum =
302 SOC_ENUM_SINGLE(NAU8824_REG_PORT0_I2S_PCM_CTRL_1, 12,
303 ARRAY_SIZE(nau8824_companding), nau8824_companding);
304
305static const struct soc_enum nau8824_companding_dac_enum =
306 SOC_ENUM_SINGLE(NAU8824_REG_PORT0_I2S_PCM_CTRL_1, 14,
307 ARRAY_SIZE(nau8824_companding), nau8824_companding);
308
309static const char * const nau8824_adc_decimation[] = {
310 "32", "64", "128", "256" };
311
312static const struct soc_enum nau8824_adc_decimation_enum =
313 SOC_ENUM_SINGLE(NAU8824_REG_ADC_FILTER_CTRL, 0,
314 ARRAY_SIZE(nau8824_adc_decimation), nau8824_adc_decimation);
315
316static const char * const nau8824_dac_oversampl[] = {
317 "64", "256", "128", "", "32" };
318
319static const struct soc_enum nau8824_dac_oversampl_enum =
320 SOC_ENUM_SINGLE(NAU8824_REG_DAC_FILTER_CTRL_1, 0,
321 ARRAY_SIZE(nau8824_dac_oversampl), nau8824_dac_oversampl);
322
323static const char * const nau8824_input_channel[] = {
324 "Input CH0", "Input CH1", "Input CH2", "Input CH3" };
325
326static const struct soc_enum nau8824_adc_ch0_enum =
327 SOC_ENUM_SINGLE(NAU8824_REG_ADC_CH0_DGAIN_CTRL, 9,
328 ARRAY_SIZE(nau8824_input_channel), nau8824_input_channel);
329
330static const struct soc_enum nau8824_adc_ch1_enum =
331 SOC_ENUM_SINGLE(NAU8824_REG_ADC_CH1_DGAIN_CTRL, 9,
332 ARRAY_SIZE(nau8824_input_channel), nau8824_input_channel);
333
334static const struct soc_enum nau8824_adc_ch2_enum =
335 SOC_ENUM_SINGLE(NAU8824_REG_ADC_CH2_DGAIN_CTRL, 9,
336 ARRAY_SIZE(nau8824_input_channel), nau8824_input_channel);
337
338static const struct soc_enum nau8824_adc_ch3_enum =
339 SOC_ENUM_SINGLE(NAU8824_REG_ADC_CH3_DGAIN_CTRL, 9,
340 ARRAY_SIZE(nau8824_input_channel), nau8824_input_channel);
341
342static const char * const nau8824_tdm_slot[] = {
343 "Slot 0", "Slot 1", "Slot 2", "Slot 3" };
344
345static const struct soc_enum nau8824_dac_left_sel_enum =
346 SOC_ENUM_SINGLE(NAU8824_REG_TDM_CTRL, 6,
347 ARRAY_SIZE(nau8824_tdm_slot), nau8824_tdm_slot);
348
349static const struct soc_enum nau8824_dac_right_sel_enum =
350 SOC_ENUM_SINGLE(NAU8824_REG_TDM_CTRL, 4,
351 ARRAY_SIZE(nau8824_tdm_slot), nau8824_tdm_slot);
352
353static const DECLARE_TLV_DB_MINMAX_MUTE(spk_vol_tlv, 0, 2400);
354static const DECLARE_TLV_DB_MINMAX(hp_vol_tlv, -3000, 0);
355static const DECLARE_TLV_DB_SCALE(mic_vol_tlv, 0, 200, 0);
356static const DECLARE_TLV_DB_SCALE(dmic_vol_tlv, -12800, 50, 0);
357
358static const struct snd_kcontrol_new nau8824_snd_controls[] = {
359 SOC_ENUM("ADC Companding", nau8824_companding_adc_enum),
360 SOC_ENUM("DAC Companding", nau8824_companding_dac_enum),
361
362 SOC_ENUM("ADC Decimation Rate", nau8824_adc_decimation_enum),
363 SOC_ENUM("DAC Oversampling Rate", nau8824_dac_oversampl_enum),
364
365 SOC_SINGLE_TLV("Speaker Right DACR Volume",
366 NAU8824_REG_CLASSD_GAIN_1, 8, 0x1f, 0, spk_vol_tlv),
367 SOC_SINGLE_TLV("Speaker Left DACL Volume",
368 NAU8824_REG_CLASSD_GAIN_2, 0, 0x1f, 0, spk_vol_tlv),
369 SOC_SINGLE_TLV("Speaker Left DACR Volume",
370 NAU8824_REG_CLASSD_GAIN_1, 0, 0x1f, 0, spk_vol_tlv),
371 SOC_SINGLE_TLV("Speaker Right DACL Volume",
372 NAU8824_REG_CLASSD_GAIN_2, 8, 0x1f, 0, spk_vol_tlv),
373
374 SOC_SINGLE_TLV("Headphone Right DACR Volume",
375 NAU8824_REG_ATT_PORT0, 8, 0x1f, 0, hp_vol_tlv),
376 SOC_SINGLE_TLV("Headphone Left DACL Volume",
377 NAU8824_REG_ATT_PORT0, 0, 0x1f, 0, hp_vol_tlv),
378 SOC_SINGLE_TLV("Headphone Right DACL Volume",
379 NAU8824_REG_ATT_PORT1, 8, 0x1f, 0, hp_vol_tlv),
380 SOC_SINGLE_TLV("Headphone Left DACR Volume",
381 NAU8824_REG_ATT_PORT1, 0, 0x1f, 0, hp_vol_tlv),
382
383 SOC_SINGLE_TLV("MIC1 Volume", NAU8824_REG_FEPGA_II,
384 NAU8824_FEPGA_GAINL_SFT, 0x12, 0, mic_vol_tlv),
385 SOC_SINGLE_TLV("MIC2 Volume", NAU8824_REG_FEPGA_II,
386 NAU8824_FEPGA_GAINR_SFT, 0x12, 0, mic_vol_tlv),
387
388 SOC_SINGLE_TLV("DMIC1 Volume", NAU8824_REG_ADC_CH0_DGAIN_CTRL,
389 0, 0x164, 0, dmic_vol_tlv),
390 SOC_SINGLE_TLV("DMIC2 Volume", NAU8824_REG_ADC_CH1_DGAIN_CTRL,
391 0, 0x164, 0, dmic_vol_tlv),
392 SOC_SINGLE_TLV("DMIC3 Volume", NAU8824_REG_ADC_CH2_DGAIN_CTRL,
393 0, 0x164, 0, dmic_vol_tlv),
394 SOC_SINGLE_TLV("DMIC4 Volume", NAU8824_REG_ADC_CH3_DGAIN_CTRL,
395 0, 0x164, 0, dmic_vol_tlv),
396
397 SOC_ENUM("ADC CH0 Select", nau8824_adc_ch0_enum),
398 SOC_ENUM("ADC CH1 Select", nau8824_adc_ch1_enum),
399 SOC_ENUM("ADC CH2 Select", nau8824_adc_ch2_enum),
400 SOC_ENUM("ADC CH3 Select", nau8824_adc_ch3_enum),
401
402 SOC_SINGLE("ADC CH0 TX Switch", NAU8824_REG_TDM_CTRL, 0, 1, 0),
403 SOC_SINGLE("ADC CH1 TX Switch", NAU8824_REG_TDM_CTRL, 1, 1, 0),
404 SOC_SINGLE("ADC CH2 TX Switch", NAU8824_REG_TDM_CTRL, 2, 1, 0),
405 SOC_SINGLE("ADC CH3 TX Switch", NAU8824_REG_TDM_CTRL, 3, 1, 0),
406
407 SOC_ENUM("DACL Channel Source", nau8824_dac_left_sel_enum),
408 SOC_ENUM("DACR Channel Source", nau8824_dac_right_sel_enum),
409
410 SOC_SINGLE("DACL LR Mix", NAU8824_REG_DAC_MUTE_CTRL, 0, 1, 0),
411 SOC_SINGLE("DACR LR Mix", NAU8824_REG_DAC_MUTE_CTRL, 1, 1, 0),
412};
413
414static int nau8824_output_dac_event(struct snd_soc_dapm_widget *w,
415 struct snd_kcontrol *kcontrol, int event)
416{
417 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
418 struct nau8824 *nau8824 = snd_soc_codec_get_drvdata(codec);
419
420 switch (event) {
421 case SND_SOC_DAPM_PRE_PMU:
422 /* Disables the TESTDAC to let DAC signal pass through. */
423 regmap_update_bits(nau8824->regmap, NAU8824_REG_ENABLE_LO,
424 NAU8824_TEST_DAC_EN, 0);
425 break;
426 case SND_SOC_DAPM_POST_PMD:
427 regmap_update_bits(nau8824->regmap, NAU8824_REG_ENABLE_LO,
428 NAU8824_TEST_DAC_EN, NAU8824_TEST_DAC_EN);
429 break;
430 default:
431 return -EINVAL;
432 }
433
434 return 0;
435}
436
437static int nau8824_spk_event(struct snd_soc_dapm_widget *w,
438 struct snd_kcontrol *kcontrol, int event)
439{
440 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
441 struct nau8824 *nau8824 = snd_soc_codec_get_drvdata(codec);
442
443 switch (event) {
444 case SND_SOC_DAPM_PRE_PMU:
445 regmap_update_bits(nau8824->regmap,
446 NAU8824_REG_ANALOG_CONTROL_2,
447 NAU8824_CLASSD_CLAMP_DIS, NAU8824_CLASSD_CLAMP_DIS);
448 break;
449 case SND_SOC_DAPM_POST_PMD:
450 regmap_update_bits(nau8824->regmap,
451 NAU8824_REG_ANALOG_CONTROL_2,
452 NAU8824_CLASSD_CLAMP_DIS, 0);
453 break;
454 default:
455 return -EINVAL;
456 }
457
458 return 0;
459}
460
461static int nau8824_pump_event(struct snd_soc_dapm_widget *w,
462 struct snd_kcontrol *kcontrol, int event)
463{
464 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
465 struct nau8824 *nau8824 = snd_soc_codec_get_drvdata(codec);
466
467 switch (event) {
468 case SND_SOC_DAPM_POST_PMU:
469 /* Prevent startup click by letting charge pump to ramp up */
470 msleep(10);
471 regmap_update_bits(nau8824->regmap,
472 NAU8824_REG_CHARGE_PUMP_CONTROL,
473 NAU8824_JAMNODCLOW, NAU8824_JAMNODCLOW);
474 break;
475 case SND_SOC_DAPM_PRE_PMD:
476 regmap_update_bits(nau8824->regmap,
477 NAU8824_REG_CHARGE_PUMP_CONTROL,
478 NAU8824_JAMNODCLOW, 0);
479 break;
480 default:
481 return -EINVAL;
482 }
483
484 return 0;
485}
486
487static int system_clock_control(struct snd_soc_dapm_widget *w,
488 struct snd_kcontrol *k, int event)
489{
490 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
491 struct nau8824 *nau8824 = snd_soc_codec_get_drvdata(codec);
492
493 if (SND_SOC_DAPM_EVENT_OFF(event)) {
494 /* Set clock source to disable or internal clock before the
495 * playback or capture end. Codec needs clock for Jack
496 * detection and button press if jack inserted; otherwise,
497 * the clock should be closed.
498 */
499 if (nau8824_is_jack_inserted(nau8824)) {
500 nau8824_config_sysclk(nau8824,
501 NAU8824_CLK_INTERNAL, 0);
502 } else {
503 nau8824_config_sysclk(nau8824, NAU8824_CLK_DIS, 0);
504 }
505 }
506 return 0;
507}
508
509static int dmic_clock_control(struct snd_soc_dapm_widget *w,
510 struct snd_kcontrol *k, int event)
511{
512 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
513 struct nau8824 *nau8824 = snd_soc_codec_get_drvdata(codec);
514 int src;
515
516 /* The DMIC clock is gotten from system clock (256fs) divided by
517 * DMIC_SRC (1, 2, 4, 8, 16, 32). The clock has to be equal or
518 * less than 3.072 MHz.
519 */
520 for (src = 0; src < 5; src++) {
521 if ((0x1 << (8 - src)) * nau8824->fs <= DMIC_CLK)
522 break;
523 }
524 dev_dbg(nau8824->dev, "dmic src %d for mclk %d\n", src, nau8824->fs * 256);
525 regmap_update_bits(nau8824->regmap, NAU8824_REG_CLK_DIVIDER,
526 NAU8824_CLK_DMIC_SRC_MASK, (src << NAU8824_CLK_DMIC_SRC_SFT));
527
528 return 0;
529}
530
531static const struct snd_kcontrol_new nau8824_adc_ch0_dmic =
532 SOC_DAPM_SINGLE("Switch", NAU8824_REG_ENA_CTRL,
533 NAU8824_ADC_CH0_DMIC_SFT, 1, 0);
534
535static const struct snd_kcontrol_new nau8824_adc_ch1_dmic =
536 SOC_DAPM_SINGLE("Switch", NAU8824_REG_ENA_CTRL,
537 NAU8824_ADC_CH1_DMIC_SFT, 1, 0);
538
539static const struct snd_kcontrol_new nau8824_adc_ch2_dmic =
540 SOC_DAPM_SINGLE("Switch", NAU8824_REG_ENA_CTRL,
541 NAU8824_ADC_CH2_DMIC_SFT, 1, 0);
542
543static const struct snd_kcontrol_new nau8824_adc_ch3_dmic =
544 SOC_DAPM_SINGLE("Switch", NAU8824_REG_ENA_CTRL,
545 NAU8824_ADC_CH3_DMIC_SFT, 1, 0);
546
547static const struct snd_kcontrol_new nau8824_adc_left_mixer[] = {
548 SOC_DAPM_SINGLE("MIC Switch", NAU8824_REG_FEPGA,
549 NAU8824_FEPGA_MODEL_MIC1_SFT, 1, 0),
550 SOC_DAPM_SINGLE("HSMIC Switch", NAU8824_REG_FEPGA,
551 NAU8824_FEPGA_MODEL_HSMIC_SFT, 1, 0),
552};
553
554static const struct snd_kcontrol_new nau8824_adc_right_mixer[] = {
555 SOC_DAPM_SINGLE("MIC Switch", NAU8824_REG_FEPGA,
556 NAU8824_FEPGA_MODER_MIC2_SFT, 1, 0),
557 SOC_DAPM_SINGLE("HSMIC Switch", NAU8824_REG_FEPGA,
558 NAU8824_FEPGA_MODER_HSMIC_SFT, 1, 0),
559};
560
561static const struct snd_kcontrol_new nau8824_hp_left_mixer[] = {
562 SOC_DAPM_SINGLE("DAC Right Switch", NAU8824_REG_ENABLE_LO,
563 NAU8824_DACR_HPL_EN_SFT, 1, 0),
564 SOC_DAPM_SINGLE("DAC Left Switch", NAU8824_REG_ENABLE_LO,
565 NAU8824_DACL_HPL_EN_SFT, 1, 0),
566};
567
568static const struct snd_kcontrol_new nau8824_hp_right_mixer[] = {
569 SOC_DAPM_SINGLE("DAC Left Switch", NAU8824_REG_ENABLE_LO,
570 NAU8824_DACL_HPR_EN_SFT, 1, 0),
571 SOC_DAPM_SINGLE("DAC Right Switch", NAU8824_REG_ENABLE_LO,
572 NAU8824_DACR_HPR_EN_SFT, 1, 0),
573};
574
575static const char * const nau8824_dac_src[] = { "DACL", "DACR" };
576
577static SOC_ENUM_SINGLE_DECL(
578 nau8824_dacl_enum, NAU8824_REG_DAC_CH0_DGAIN_CTRL,
579 NAU8824_DAC_CH0_SEL_SFT, nau8824_dac_src);
580
581static SOC_ENUM_SINGLE_DECL(
582 nau8824_dacr_enum, NAU8824_REG_DAC_CH1_DGAIN_CTRL,
583 NAU8824_DAC_CH1_SEL_SFT, nau8824_dac_src);
584
585static const struct snd_kcontrol_new nau8824_dacl_mux =
586 SOC_DAPM_ENUM("DACL Source", nau8824_dacl_enum);
587
588static const struct snd_kcontrol_new nau8824_dacr_mux =
589 SOC_DAPM_ENUM("DACR Source", nau8824_dacr_enum);
590
591
592static const struct snd_soc_dapm_widget nau8824_dapm_widgets[] = {
593 SND_SOC_DAPM_SUPPLY("System Clock", SND_SOC_NOPM, 0, 0,
594 system_clock_control, SND_SOC_DAPM_POST_PMD),
595
596 SND_SOC_DAPM_INPUT("HSMIC1"),
597 SND_SOC_DAPM_INPUT("HSMIC2"),
598 SND_SOC_DAPM_INPUT("MIC1"),
599 SND_SOC_DAPM_INPUT("MIC2"),
600 SND_SOC_DAPM_INPUT("DMIC1"),
601 SND_SOC_DAPM_INPUT("DMIC2"),
602 SND_SOC_DAPM_INPUT("DMIC3"),
603 SND_SOC_DAPM_INPUT("DMIC4"),
604
605 SND_SOC_DAPM_SUPPLY("SAR", NAU8824_REG_SAR_ADC,
606 NAU8824_SAR_ADC_EN_SFT, 0, NULL, 0),
607 SND_SOC_DAPM_SUPPLY("MICBIAS", NAU8824_REG_MIC_BIAS,
608 NAU8824_MICBIAS_POWERUP_SFT, 0, NULL, 0),
609 SND_SOC_DAPM_SUPPLY("DMIC12 Power", NAU8824_REG_BIAS_ADJ,
610 NAU8824_DMIC1_EN_SFT, 0, NULL, 0),
611 SND_SOC_DAPM_SUPPLY("DMIC34 Power", NAU8824_REG_BIAS_ADJ,
612 NAU8824_DMIC2_EN_SFT, 0, NULL, 0),
613 SND_SOC_DAPM_SUPPLY("DMIC Clock", SND_SOC_NOPM, 0, 0,
614 dmic_clock_control, SND_SOC_DAPM_POST_PMU),
615
616 SND_SOC_DAPM_SWITCH("DMIC1 Enable", SND_SOC_NOPM,
617 0, 0, &nau8824_adc_ch0_dmic),
618 SND_SOC_DAPM_SWITCH("DMIC2 Enable", SND_SOC_NOPM,
619 0, 0, &nau8824_adc_ch1_dmic),
620 SND_SOC_DAPM_SWITCH("DMIC3 Enable", SND_SOC_NOPM,
621 0, 0, &nau8824_adc_ch2_dmic),
622 SND_SOC_DAPM_SWITCH("DMIC4 Enable", SND_SOC_NOPM,
623 0, 0, &nau8824_adc_ch3_dmic),
624
625 SND_SOC_DAPM_MIXER("Left ADC", NAU8824_REG_POWER_UP_CONTROL,
626 12, 0, nau8824_adc_left_mixer,
627 ARRAY_SIZE(nau8824_adc_left_mixer)),
628 SND_SOC_DAPM_MIXER("Right ADC", NAU8824_REG_POWER_UP_CONTROL,
629 13, 0, nau8824_adc_right_mixer,
630 ARRAY_SIZE(nau8824_adc_right_mixer)),
631
632 SND_SOC_DAPM_ADC("ADCL", NULL, NAU8824_REG_ANALOG_ADC_2,
633 NAU8824_ADCL_EN_SFT, 0),
634 SND_SOC_DAPM_ADC("ADCR", NULL, NAU8824_REG_ANALOG_ADC_2,
635 NAU8824_ADCR_EN_SFT, 0),
636
637 SND_SOC_DAPM_AIF_OUT("AIFTX", "HiFi Capture", 0, SND_SOC_NOPM, 0, 0),
638 SND_SOC_DAPM_AIF_IN("AIFRX", "HiFi Playback", 0, SND_SOC_NOPM, 0, 0),
639
640 SND_SOC_DAPM_DAC("DACL", NULL, NAU8824_REG_RDAC,
641 NAU8824_DACL_EN_SFT, 0),
642 SND_SOC_DAPM_SUPPLY("DACL Clock", NAU8824_REG_RDAC,
643 NAU8824_DACL_CLK_SFT, 0, NULL, 0),
644 SND_SOC_DAPM_DAC("DACR", NULL, NAU8824_REG_RDAC,
645 NAU8824_DACR_EN_SFT, 0),
646 SND_SOC_DAPM_SUPPLY("DACR Clock", NAU8824_REG_RDAC,
647 NAU8824_DACR_CLK_SFT, 0, NULL, 0),
648
649 SND_SOC_DAPM_MUX("DACL Mux", SND_SOC_NOPM, 0, 0, &nau8824_dacl_mux),
650 SND_SOC_DAPM_MUX("DACR Mux", SND_SOC_NOPM, 0, 0, &nau8824_dacr_mux),
651
652 SND_SOC_DAPM_PGA_S("Output DACL", 0, NAU8824_REG_CHARGE_PUMP_CONTROL,
653 8, 1, nau8824_output_dac_event,
654 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
655 SND_SOC_DAPM_PGA_S("Output DACR", 0, NAU8824_REG_CHARGE_PUMP_CONTROL,
656 9, 1, nau8824_output_dac_event,
657 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
658
659 SND_SOC_DAPM_PGA_S("ClassD", 0, NAU8824_REG_CLASSD_GAIN_1,
660 NAU8824_CLASSD_EN_SFT, 0, nau8824_spk_event,
661 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
662
663 SND_SOC_DAPM_MIXER("Left Headphone", NAU8824_REG_CLASSG,
664 NAU8824_CLASSG_LDAC_EN_SFT, 0, nau8824_hp_left_mixer,
665 ARRAY_SIZE(nau8824_hp_left_mixer)),
666 SND_SOC_DAPM_MIXER("Right Headphone", NAU8824_REG_CLASSG,
667 NAU8824_CLASSG_RDAC_EN_SFT, 0, nau8824_hp_right_mixer,
668 ARRAY_SIZE(nau8824_hp_right_mixer)),
669 SND_SOC_DAPM_PGA_S("Charge Pump", 1, NAU8824_REG_CHARGE_PUMP_CONTROL,
670 NAU8824_CHARGE_PUMP_EN_SFT, 0, nau8824_pump_event,
671 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
672 SND_SOC_DAPM_PGA("Output Driver L",
673 NAU8824_REG_POWER_UP_CONTROL, 3, 0, NULL, 0),
674 SND_SOC_DAPM_PGA("Output Driver R",
675 NAU8824_REG_POWER_UP_CONTROL, 2, 0, NULL, 0),
676 SND_SOC_DAPM_PGA("Main Driver L",
677 NAU8824_REG_POWER_UP_CONTROL, 1, 0, NULL, 0),
678 SND_SOC_DAPM_PGA("Main Driver R",
679 NAU8824_REG_POWER_UP_CONTROL, 0, 0, NULL, 0),
680 SND_SOC_DAPM_PGA("HP Boost Driver", NAU8824_REG_BOOST,
681 NAU8824_HP_BOOST_DIS_SFT, 1, NULL, 0),
682 SND_SOC_DAPM_PGA("Class G", NAU8824_REG_CLASSG,
683 NAU8824_CLASSG_EN_SFT, 0, NULL, 0),
684
685 SND_SOC_DAPM_OUTPUT("SPKOUTL"),
686 SND_SOC_DAPM_OUTPUT("SPKOUTR"),
687 SND_SOC_DAPM_OUTPUT("HPOL"),
688 SND_SOC_DAPM_OUTPUT("HPOR"),
689};
690
691static const struct snd_soc_dapm_route nau8824_dapm_routes[] = {
692 {"DMIC1 Enable", "Switch", "DMIC1"},
693 {"DMIC2 Enable", "Switch", "DMIC2"},
694 {"DMIC3 Enable", "Switch", "DMIC3"},
695 {"DMIC4 Enable", "Switch", "DMIC4"},
696
697 {"DMIC1", NULL, "DMIC12 Power"},
698 {"DMIC2", NULL, "DMIC12 Power"},
699 {"DMIC3", NULL, "DMIC34 Power"},
700 {"DMIC4", NULL, "DMIC34 Power"},
701 {"DMIC12 Power", NULL, "DMIC Clock"},
702 {"DMIC34 Power", NULL, "DMIC Clock"},
703
704 {"Left ADC", "MIC Switch", "MIC1"},
705 {"Left ADC", "HSMIC Switch", "HSMIC1"},
706 {"Right ADC", "MIC Switch", "MIC2"},
707 {"Right ADC", "HSMIC Switch", "HSMIC2"},
708
709 {"ADCL", NULL, "Left ADC"},
710 {"ADCR", NULL, "Right ADC"},
711
712 {"AIFTX", NULL, "MICBIAS"},
713 {"AIFTX", NULL, "ADCL"},
714 {"AIFTX", NULL, "ADCR"},
715 {"AIFTX", NULL, "DMIC1 Enable"},
716 {"AIFTX", NULL, "DMIC2 Enable"},
717 {"AIFTX", NULL, "DMIC3 Enable"},
718 {"AIFTX", NULL, "DMIC4 Enable"},
719
720 {"AIFTX", NULL, "System Clock"},
721 {"AIFRX", NULL, "System Clock"},
722
723 {"DACL", NULL, "AIFRX"},
724 {"DACL", NULL, "DACL Clock"},
725 {"DACR", NULL, "AIFRX"},
726 {"DACR", NULL, "DACR Clock"},
727
728 {"DACL Mux", "DACL", "DACL"},
729 {"DACL Mux", "DACR", "DACR"},
730 {"DACR Mux", "DACL", "DACL"},
731 {"DACR Mux", "DACR", "DACR"},
732
733 {"Output DACL", NULL, "DACL Mux"},
734 {"Output DACR", NULL, "DACR Mux"},
735
736 {"ClassD", NULL, "Output DACL"},
737 {"ClassD", NULL, "Output DACR"},
738
739 {"Left Headphone", "DAC Left Switch", "Output DACL"},
740 {"Left Headphone", "DAC Right Switch", "Output DACR"},
741 {"Right Headphone", "DAC Left Switch", "Output DACL"},
742 {"Right Headphone", "DAC Right Switch", "Output DACR"},
743
744 {"Charge Pump", NULL, "Left Headphone"},
745 {"Charge Pump", NULL, "Right Headphone"},
746 {"Output Driver L", NULL, "Charge Pump"},
747 {"Output Driver R", NULL, "Charge Pump"},
748 {"Main Driver L", NULL, "Output Driver L"},
749 {"Main Driver R", NULL, "Output Driver R"},
750 {"Class G", NULL, "Main Driver L"},
751 {"Class G", NULL, "Main Driver R"},
752 {"HP Boost Driver", NULL, "Class G"},
753
754 {"SPKOUTL", NULL, "ClassD"},
755 {"SPKOUTR", NULL, "ClassD"},
756 {"HPOL", NULL, "HP Boost Driver"},
757 {"HPOR", NULL, "HP Boost Driver"},
758};
759
760static bool nau8824_is_jack_inserted(struct nau8824 *nau8824)
761{
762 struct snd_soc_jack *jack = nau8824->jack;
763 bool insert = FALSE;
764
765 if (nau8824->irq && jack)
766 insert = jack->status & SND_JACK_HEADPHONE;
767
768 return insert;
769}
770
771static void nau8824_int_status_clear_all(struct regmap *regmap)
772{
773 int active_irq, clear_irq, i;
774
775 /* Reset the intrruption status from rightmost bit if the corres-
776 * ponding irq event occurs.
777 */
778 regmap_read(regmap, NAU8824_REG_IRQ, &active_irq);
779 for (i = 0; i < NAU8824_REG_DATA_LEN; i++) {
780 clear_irq = (0x1 << i);
781 if (active_irq & clear_irq)
782 regmap_write(regmap,
783 NAU8824_REG_CLEAR_INT_REG, clear_irq);
784 }
785}
786
787static void nau8824_eject_jack(struct nau8824 *nau8824)
788{
789 struct snd_soc_dapm_context *dapm = nau8824->dapm;
790 struct regmap *regmap = nau8824->regmap;
791
792 /* Clear all interruption status */
793 nau8824_int_status_clear_all(regmap);
794
795 snd_soc_dapm_disable_pin(dapm, "SAR");
796 snd_soc_dapm_disable_pin(dapm, "MICBIAS");
797 snd_soc_dapm_sync(dapm);
798
799 /* Enable the insertion interruption, disable the ejection
800 * interruption, and then bypass de-bounce circuit.
801 */
802 regmap_update_bits(regmap, NAU8824_REG_INTERRUPT_SETTING,
803 NAU8824_IRQ_KEY_RELEASE_DIS | NAU8824_IRQ_KEY_SHORT_PRESS_DIS |
804 NAU8824_IRQ_EJECT_DIS | NAU8824_IRQ_INSERT_DIS,
805 NAU8824_IRQ_KEY_RELEASE_DIS | NAU8824_IRQ_KEY_SHORT_PRESS_DIS |
806 NAU8824_IRQ_EJECT_DIS);
807 regmap_update_bits(regmap, NAU8824_REG_INTERRUPT_SETTING_1,
808 NAU8824_IRQ_INSERT_EN | NAU8824_IRQ_EJECT_EN,
809 NAU8824_IRQ_INSERT_EN);
810 regmap_update_bits(regmap, NAU8824_REG_ENA_CTRL,
811 NAU8824_JD_SLEEP_MODE, NAU8824_JD_SLEEP_MODE);
812
813 /* Close clock for jack type detection at manual mode */
814 nau8824_config_sysclk(nau8824, NAU8824_CLK_DIS, 0);
815}
816
817static void nau8824_jdet_work(struct work_struct *work)
818{
819 struct nau8824 *nau8824 = container_of(
820 work, struct nau8824, jdet_work);
821 struct snd_soc_dapm_context *dapm = nau8824->dapm;
822 struct regmap *regmap = nau8824->regmap;
823 int adc_value, event = 0, event_mask = 0;
824
825 snd_soc_dapm_force_enable_pin(dapm, "MICBIAS");
826 snd_soc_dapm_force_enable_pin(dapm, "SAR");
827 snd_soc_dapm_sync(dapm);
828
829 msleep(100);
830
831 regmap_read(regmap, NAU8824_REG_SAR_ADC_DATA_OUT, &adc_value);
832 adc_value = adc_value & NAU8824_SAR_ADC_DATA_MASK;
833 dev_dbg(nau8824->dev, "SAR ADC data 0x%02x\n", adc_value);
834 if (adc_value < HEADSET_SARADC_THD) {
835 event |= SND_JACK_HEADPHONE;
836
837 snd_soc_dapm_disable_pin(dapm, "SAR");
838 snd_soc_dapm_disable_pin(dapm, "MICBIAS");
839 snd_soc_dapm_sync(dapm);
840 } else {
841 event |= SND_JACK_HEADSET;
842 }
843 event_mask |= SND_JACK_HEADSET;
844 snd_soc_jack_report(nau8824->jack, event, event_mask);
845
846 nau8824_sema_release(nau8824);
847}
848
849static void nau8824_setup_auto_irq(struct nau8824 *nau8824)
850{
851 struct regmap *regmap = nau8824->regmap;
852
853 /* Enable jack ejection, short key press and release interruption. */
854 regmap_update_bits(regmap, NAU8824_REG_INTERRUPT_SETTING_1,
855 NAU8824_IRQ_INSERT_EN | NAU8824_IRQ_EJECT_EN,
856 NAU8824_IRQ_EJECT_EN);
857 regmap_update_bits(regmap, NAU8824_REG_INTERRUPT_SETTING,
858 NAU8824_IRQ_EJECT_DIS | NAU8824_IRQ_KEY_RELEASE_DIS |
859 NAU8824_IRQ_KEY_SHORT_PRESS_DIS, 0);
860 /* Enable internal VCO needed for interruptions */
861 nau8824_config_sysclk(nau8824, NAU8824_CLK_INTERNAL, 0);
862 regmap_update_bits(regmap, NAU8824_REG_ENA_CTRL,
863 NAU8824_JD_SLEEP_MODE, 0);
864}
865
866static int nau8824_button_decode(int value)
867{
868 int buttons = 0;
869
870 /* The chip supports up to 8 buttons, but ALSA defines
871 * only 6 buttons.
872 */
873 if (value & BIT(0))
874 buttons |= SND_JACK_BTN_0;
875 if (value & BIT(1))
876 buttons |= SND_JACK_BTN_1;
877 if (value & BIT(2))
878 buttons |= SND_JACK_BTN_2;
879 if (value & BIT(3))
880 buttons |= SND_JACK_BTN_3;
881 if (value & BIT(4))
882 buttons |= SND_JACK_BTN_4;
883 if (value & BIT(5))
884 buttons |= SND_JACK_BTN_5;
885
886 return buttons;
887}
888
889#define NAU8824_BUTTONS (SND_JACK_BTN_0 | SND_JACK_BTN_1 | \
890 SND_JACK_BTN_2 | SND_JACK_BTN_3)
891
892static irqreturn_t nau8824_interrupt(int irq, void *data)
893{
894 struct nau8824 *nau8824 = (struct nau8824 *)data;
895 struct regmap *regmap = nau8824->regmap;
896 int active_irq, clear_irq = 0, event = 0, event_mask = 0;
897
898 if (regmap_read(regmap, NAU8824_REG_IRQ, &active_irq)) {
899 dev_err(nau8824->dev, "failed to read irq status\n");
900 return IRQ_NONE;
901 }
902 dev_dbg(nau8824->dev, "IRQ %x\n", active_irq);
903
904 if (active_irq & NAU8824_JACK_EJECTION_DETECTED) {
905 nau8824_eject_jack(nau8824);
906 event_mask |= SND_JACK_HEADSET;
907 clear_irq = NAU8824_JACK_EJECTION_DETECTED;
908 /* release semaphore held after resume,
909 * and cancel jack detection
910 */
911 nau8824_sema_release(nau8824);
912 cancel_work_sync(&nau8824->jdet_work);
913 } else if (active_irq & NAU8824_KEY_SHORT_PRESS_IRQ) {
914 int key_status, button_pressed;
915
916 regmap_read(regmap, NAU8824_REG_CLEAR_INT_REG,
917 &key_status);
918
919 /* lower 8 bits of the register are for pressed keys */
920 button_pressed = nau8824_button_decode(key_status);
921
922 event |= button_pressed;
923 dev_dbg(nau8824->dev, "button %x pressed\n", event);
924 event_mask |= NAU8824_BUTTONS;
925 clear_irq = NAU8824_KEY_SHORT_PRESS_IRQ;
926 } else if (active_irq & NAU8824_KEY_RELEASE_IRQ) {
927 event_mask = NAU8824_BUTTONS;
928 clear_irq = NAU8824_KEY_RELEASE_IRQ;
929 } else if (active_irq & NAU8824_JACK_INSERTION_DETECTED) {
930 /* Turn off insertion interruption at manual mode */
931 regmap_update_bits(regmap,
932 NAU8824_REG_INTERRUPT_SETTING,
933 NAU8824_IRQ_INSERT_DIS,
934 NAU8824_IRQ_INSERT_DIS);
935 regmap_update_bits(regmap,
936 NAU8824_REG_INTERRUPT_SETTING_1,
937 NAU8824_IRQ_INSERT_EN, 0);
938 /* detect microphone and jack type */
939 cancel_work_sync(&nau8824->jdet_work);
940 schedule_work(&nau8824->jdet_work);
941
942 /* Enable interruption for jack type detection at audo
943 * mode which can detect microphone and jack type.
944 */
945 nau8824_setup_auto_irq(nau8824);
946 }
947
948 if (!clear_irq)
949 clear_irq = active_irq;
950 /* clears the rightmost interruption */
951 regmap_write(regmap, NAU8824_REG_CLEAR_INT_REG, clear_irq);
952
953 if (event_mask)
954 snd_soc_jack_report(nau8824->jack, event, event_mask);
955
956 return IRQ_HANDLED;
957}
958
959static int nau8824_clock_check(struct nau8824 *nau8824,
960 int stream, int rate, int osr)
961{
962 int osrate;
963
964 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
965 if (osr >= ARRAY_SIZE(osr_dac_sel))
966 return -EINVAL;
967 osrate = osr_dac_sel[osr].osr;
968 } else {
969 if (osr >= ARRAY_SIZE(osr_adc_sel))
970 return -EINVAL;
971 osrate = osr_adc_sel[osr].osr;
972 }
973
974 if (!osrate || rate * osr > CLK_DA_AD_MAX) {
975 dev_err(nau8824->dev, "exceed the maximum frequency of CLK_ADC or CLK_DAC\n");
976 return -EINVAL;
977 }
978
979 return 0;
980}
981
982static int nau8824_hw_params(struct snd_pcm_substream *substream,
983 struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
984{
985 struct snd_soc_codec *codec = dai->codec;
986 struct nau8824 *nau8824 = snd_soc_codec_get_drvdata(codec);
987 unsigned int val_len = 0, osr, ctrl_val, bclk_fs, bclk_div;
988
989 nau8824_sema_acquire(nau8824, HZ);
990
991 /* CLK_DAC or CLK_ADC = OSR * FS
992 * DAC or ADC clock frequency is defined as Over Sampling Rate (OSR)
993 * multiplied by the audio sample rate (Fs). Note that the OSR and Fs
994 * values must be selected such that the maximum frequency is less
995 * than 6.144 MHz.
996 */
997 nau8824->fs = params_rate(params);
998 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
999 regmap_read(nau8824->regmap,
1000 NAU8824_REG_DAC_FILTER_CTRL_1, &osr);
1001 osr &= NAU8824_DAC_OVERSAMPLE_MASK;
1002 if (nau8824_clock_check(nau8824, substream->stream,
1003 nau8824->fs, osr))
1004 return -EINVAL;
1005 regmap_update_bits(nau8824->regmap, NAU8824_REG_CLK_DIVIDER,
1006 NAU8824_CLK_DAC_SRC_MASK,
1007 osr_dac_sel[osr].clk_src << NAU8824_CLK_DAC_SRC_SFT);
1008 } else {
1009 regmap_read(nau8824->regmap,
1010 NAU8824_REG_ADC_FILTER_CTRL, &osr);
1011 osr &= NAU8824_ADC_SYNC_DOWN_MASK;
1012 if (nau8824_clock_check(nau8824, substream->stream,
1013 nau8824->fs, osr))
1014 return -EINVAL;
1015 regmap_update_bits(nau8824->regmap, NAU8824_REG_CLK_DIVIDER,
1016 NAU8824_CLK_ADC_SRC_MASK,
1017 osr_adc_sel[osr].clk_src << NAU8824_CLK_ADC_SRC_SFT);
1018 }
1019
1020 /* make BCLK and LRC divde configuration if the codec as master. */
1021 regmap_read(nau8824->regmap,
1022 NAU8824_REG_PORT0_I2S_PCM_CTRL_2, &ctrl_val);
1023 if (ctrl_val & NAU8824_I2S_MS_MASTER) {
1024 /* get the bclk and fs ratio */
1025 bclk_fs = snd_soc_params_to_bclk(params) / nau8824->fs;
1026 if (bclk_fs <= 32)
1027 bclk_div = 0x3;
1028 else if (bclk_fs <= 64)
1029 bclk_div = 0x2;
1030 else if (bclk_fs <= 128)
1031 bclk_div = 0x1;
1032 else if (bclk_fs <= 256)
1033 bclk_div = 0;
1034 else
1035 return -EINVAL;
1036 regmap_update_bits(nau8824->regmap,
1037 NAU8824_REG_PORT0_I2S_PCM_CTRL_2,
1038 NAU8824_I2S_LRC_DIV_MASK | NAU8824_I2S_BLK_DIV_MASK,
1039 (bclk_div << NAU8824_I2S_LRC_DIV_SFT) | bclk_div);
1040 }
1041
1042 switch (params_width(params)) {
1043 case 16:
1044 val_len |= NAU8824_I2S_DL_16;
1045 break;
1046 case 20:
1047 val_len |= NAU8824_I2S_DL_20;
1048 break;
1049 case 24:
1050 val_len |= NAU8824_I2S_DL_24;
1051 break;
1052 case 32:
1053 val_len |= NAU8824_I2S_DL_32;
1054 break;
1055 default:
1056 return -EINVAL;
1057 }
1058
1059 regmap_update_bits(nau8824->regmap, NAU8824_REG_PORT0_I2S_PCM_CTRL_1,
1060 NAU8824_I2S_DL_MASK, val_len);
1061
1062 nau8824_sema_release(nau8824);
1063
1064 return 0;
1065}
1066
1067static int nau8824_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
1068{
1069 struct snd_soc_codec *codec = dai->codec;
1070 struct nau8824 *nau8824 = snd_soc_codec_get_drvdata(codec);
1071 unsigned int ctrl1_val = 0, ctrl2_val = 0;
1072
1073 nau8824_sema_acquire(nau8824, HZ);
1074
1075 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
1076 case SND_SOC_DAIFMT_CBM_CFM:
1077 ctrl2_val |= NAU8824_I2S_MS_MASTER;
1078 break;
1079 case SND_SOC_DAIFMT_CBS_CFS:
1080 break;
1081 default:
1082 return -EINVAL;
1083 }
1084
1085 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
1086 case SND_SOC_DAIFMT_NB_NF:
1087 break;
1088 case SND_SOC_DAIFMT_IB_NF:
1089 ctrl1_val |= NAU8824_I2S_BP_INV;
1090 break;
1091 default:
1092 return -EINVAL;
1093 }
1094
1095 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
1096 case SND_SOC_DAIFMT_I2S:
1097 ctrl1_val |= NAU8824_I2S_DF_I2S;
1098 break;
1099 case SND_SOC_DAIFMT_LEFT_J:
1100 ctrl1_val |= NAU8824_I2S_DF_LEFT;
1101 break;
1102 case SND_SOC_DAIFMT_RIGHT_J:
1103 ctrl1_val |= NAU8824_I2S_DF_RIGTH;
1104 break;
1105 case SND_SOC_DAIFMT_DSP_A:
1106 ctrl1_val |= NAU8824_I2S_DF_PCM_AB;
1107 break;
1108 case SND_SOC_DAIFMT_DSP_B:
1109 ctrl1_val |= NAU8824_I2S_DF_PCM_AB;
1110 ctrl1_val |= NAU8824_I2S_PCMB_EN;
1111 break;
1112 default:
1113 return -EINVAL;
1114 }
1115
1116 regmap_update_bits(nau8824->regmap, NAU8824_REG_PORT0_I2S_PCM_CTRL_1,
1117 NAU8824_I2S_DF_MASK | NAU8824_I2S_BP_MASK |
1118 NAU8824_I2S_PCMB_EN, ctrl1_val);
1119 regmap_update_bits(nau8824->regmap, NAU8824_REG_PORT0_I2S_PCM_CTRL_2,
1120 NAU8824_I2S_MS_MASK, ctrl2_val);
1121
1122 nau8824_sema_release(nau8824);
1123
1124 return 0;
1125}
1126
1127/**
1128 * nau8824_calc_fll_param - Calculate FLL parameters.
1129 * @fll_in: external clock provided to codec.
1130 * @fs: sampling rate.
1131 * @fll_param: Pointer to structure of FLL parameters.
1132 *
1133 * Calculate FLL parameters to configure codec.
1134 *
1135 * Returns 0 for success or negative error code.
1136 */
1137static int nau8824_calc_fll_param(unsigned int fll_in,
1138 unsigned int fs, struct nau8824_fll *fll_param)
1139{
1140 u64 fvco, fvco_max;
1141 unsigned int fref, i, fvco_sel;
1142
1143 /* Ensure the reference clock frequency (FREF) is <= 13.5MHz by dividing
1144 * freq_in by 1, 2, 4, or 8 using FLL pre-scalar.
1145 * FREF = freq_in / NAU8824_FLL_REF_DIV_MASK
1146 */
1147 for (i = 0; i < ARRAY_SIZE(fll_pre_scalar); i++) {
1148 fref = fll_in / fll_pre_scalar[i].param;
1149 if (fref <= NAU_FREF_MAX)
1150 break;
1151 }
1152 if (i == ARRAY_SIZE(fll_pre_scalar))
1153 return -EINVAL;
1154 fll_param->clk_ref_div = fll_pre_scalar[i].val;
1155
1156 /* Choose the FLL ratio based on FREF */
1157 for (i = 0; i < ARRAY_SIZE(fll_ratio); i++) {
1158 if (fref >= fll_ratio[i].param)
1159 break;
1160 }
1161 if (i == ARRAY_SIZE(fll_ratio))
1162 return -EINVAL;
1163 fll_param->ratio = fll_ratio[i].val;
1164
1165 /* Calculate the frequency of DCO (FDCO) given freq_out = 256 * Fs.
1166 * FDCO must be within the 90MHz - 124MHz or the FFL cannot be
1167 * guaranteed across the full range of operation.
1168 * FDCO = freq_out * 2 * mclk_src_scaling
1169 */
1170 fvco_max = 0;
1171 fvco_sel = ARRAY_SIZE(mclk_src_scaling);
1172 for (i = 0; i < ARRAY_SIZE(mclk_src_scaling); i++) {
1173 fvco = 256 * fs * 2 * mclk_src_scaling[i].param;
1174 if (fvco > NAU_FVCO_MIN && fvco < NAU_FVCO_MAX &&
1175 fvco_max < fvco) {
1176 fvco_max = fvco;
1177 fvco_sel = i;
1178 }
1179 }
1180 if (ARRAY_SIZE(mclk_src_scaling) == fvco_sel)
1181 return -EINVAL;
1182 fll_param->mclk_src = mclk_src_scaling[fvco_sel].val;
1183
1184 /* Calculate the FLL 10-bit integer input and the FLL 16-bit fractional
1185 * input based on FDCO, FREF and FLL ratio.
1186 */
1187 fvco = div_u64(fvco_max << 16, fref * fll_param->ratio);
1188 fll_param->fll_int = (fvco >> 16) & 0x3FF;
1189 fll_param->fll_frac = fvco & 0xFFFF;
1190 return 0;
1191}
1192
1193static void nau8824_fll_apply(struct regmap *regmap,
1194 struct nau8824_fll *fll_param)
1195{
1196 regmap_update_bits(regmap, NAU8824_REG_CLK_DIVIDER,
1197 NAU8824_CLK_SRC_MASK | NAU8824_CLK_MCLK_SRC_MASK,
1198 NAU8824_CLK_SRC_MCLK | fll_param->mclk_src);
1199 regmap_update_bits(regmap, NAU8824_REG_FLL1,
1200 NAU8824_FLL_RATIO_MASK, fll_param->ratio);
1201 /* FLL 16-bit fractional input */
1202 regmap_write(regmap, NAU8824_REG_FLL2, fll_param->fll_frac);
1203 /* FLL 10-bit integer input */
1204 regmap_update_bits(regmap, NAU8824_REG_FLL3,
1205 NAU8824_FLL_INTEGER_MASK, fll_param->fll_int);
1206 /* FLL pre-scaler */
1207 regmap_update_bits(regmap, NAU8824_REG_FLL4,
1208 NAU8824_FLL_REF_DIV_MASK,
1209 fll_param->clk_ref_div << NAU8824_FLL_REF_DIV_SFT);
1210 /* select divided VCO input */
1211 regmap_update_bits(regmap, NAU8824_REG_FLL5,
1212 NAU8824_FLL_CLK_SW_MASK, NAU8824_FLL_CLK_SW_REF);
1213 /* Disable free-running mode */
1214 regmap_update_bits(regmap,
1215 NAU8824_REG_FLL6, NAU8824_DCO_EN, 0);
1216 if (fll_param->fll_frac) {
1217 regmap_update_bits(regmap, NAU8824_REG_FLL5,
1218 NAU8824_FLL_PDB_DAC_EN | NAU8824_FLL_LOOP_FTR_EN |
1219 NAU8824_FLL_FTR_SW_MASK,
1220 NAU8824_FLL_PDB_DAC_EN | NAU8824_FLL_LOOP_FTR_EN |
1221 NAU8824_FLL_FTR_SW_FILTER);
1222 regmap_update_bits(regmap, NAU8824_REG_FLL6,
1223 NAU8824_SDM_EN, NAU8824_SDM_EN);
1224 } else {
1225 regmap_update_bits(regmap, NAU8824_REG_FLL5,
1226 NAU8824_FLL_PDB_DAC_EN | NAU8824_FLL_LOOP_FTR_EN |
1227 NAU8824_FLL_FTR_SW_MASK, NAU8824_FLL_FTR_SW_ACCU);
1228 regmap_update_bits(regmap,
1229 NAU8824_REG_FLL6, NAU8824_SDM_EN, 0);
1230 }
1231}
1232
1233/* freq_out must be 256*Fs in order to achieve the best performance */
1234static int nau8824_set_pll(struct snd_soc_codec *codec, int pll_id, int source,
1235 unsigned int freq_in, unsigned int freq_out)
1236{
1237 struct nau8824 *nau8824 = snd_soc_codec_get_drvdata(codec);
1238 struct nau8824_fll fll_param;
1239 int ret, fs;
1240
1241 fs = freq_out / 256;
1242 ret = nau8824_calc_fll_param(freq_in, fs, &fll_param);
1243 if (ret < 0) {
1244 dev_err(nau8824->dev, "Unsupported input clock %d\n", freq_in);
1245 return ret;
1246 }
1247 dev_dbg(nau8824->dev, "mclk_src=%x ratio=%x fll_frac=%x fll_int=%x clk_ref_div=%x\n",
1248 fll_param.mclk_src, fll_param.ratio, fll_param.fll_frac,
1249 fll_param.fll_int, fll_param.clk_ref_div);
1250
1251 nau8824_fll_apply(nau8824->regmap, &fll_param);
1252 mdelay(2);
1253 regmap_update_bits(nau8824->regmap, NAU8824_REG_CLK_DIVIDER,
1254 NAU8824_CLK_SRC_MASK, NAU8824_CLK_SRC_VCO);
1255
1256 return 0;
1257}
1258
1259static int nau8824_config_sysclk(struct nau8824 *nau8824,
1260 int clk_id, unsigned int freq)
1261{
1262 struct regmap *regmap = nau8824->regmap;
1263
1264 switch (clk_id) {
1265 case NAU8824_CLK_DIS:
1266 regmap_update_bits(regmap, NAU8824_REG_CLK_DIVIDER,
1267 NAU8824_CLK_SRC_MASK, NAU8824_CLK_SRC_MCLK);
1268 regmap_update_bits(regmap, NAU8824_REG_FLL6,
1269 NAU8824_DCO_EN, 0);
1270 break;
1271
1272 case NAU8824_CLK_MCLK:
1273 nau8824_sema_acquire(nau8824, HZ);
1274 regmap_update_bits(regmap, NAU8824_REG_CLK_DIVIDER,
1275 NAU8824_CLK_SRC_MASK, NAU8824_CLK_SRC_MCLK);
1276 regmap_update_bits(regmap, NAU8824_REG_FLL6,
1277 NAU8824_DCO_EN, 0);
1278 nau8824_sema_release(nau8824);
1279 break;
1280
1281 case NAU8824_CLK_INTERNAL:
1282 regmap_update_bits(regmap, NAU8824_REG_FLL6,
1283 NAU8824_DCO_EN, NAU8824_DCO_EN);
1284 regmap_update_bits(regmap, NAU8824_REG_CLK_DIVIDER,
1285 NAU8824_CLK_SRC_MASK, NAU8824_CLK_SRC_VCO);
1286 break;
1287
1288 case NAU8824_CLK_FLL_MCLK:
1289 nau8824_sema_acquire(nau8824, HZ);
1290 regmap_update_bits(regmap, NAU8824_REG_FLL3,
1291 NAU8824_FLL_CLK_SRC_MASK, NAU8824_FLL_CLK_SRC_MCLK);
1292 nau8824_sema_release(nau8824);
1293 break;
1294
1295 case NAU8824_CLK_FLL_BLK:
1296 nau8824_sema_acquire(nau8824, HZ);
1297 regmap_update_bits(regmap, NAU8824_REG_FLL3,
1298 NAU8824_FLL_CLK_SRC_MASK, NAU8824_FLL_CLK_SRC_BLK);
1299 nau8824_sema_release(nau8824);
1300 break;
1301
1302 case NAU8824_CLK_FLL_FS:
1303 nau8824_sema_acquire(nau8824, HZ);
1304 regmap_update_bits(regmap, NAU8824_REG_FLL3,
1305 NAU8824_FLL_CLK_SRC_MASK, NAU8824_FLL_CLK_SRC_FS);
1306 nau8824_sema_release(nau8824);
1307 break;
1308
1309 default:
1310 dev_err(nau8824->dev, "Invalid clock id (%d)\n", clk_id);
1311 return -EINVAL;
1312 }
1313
1314 dev_dbg(nau8824->dev, "Sysclk is %dHz and clock id is %d\n", freq,
1315 clk_id);
1316
1317 return 0;
1318}
1319
1320static int nau8824_set_sysclk(struct snd_soc_codec *codec,
1321 int clk_id, int source, unsigned int freq, int dir)
1322{
1323 struct nau8824 *nau8824 = snd_soc_codec_get_drvdata(codec);
1324
1325 return nau8824_config_sysclk(nau8824, clk_id, freq);
1326}
1327
1328static void nau8824_resume_setup(struct nau8824 *nau8824)
1329{
1330 nau8824_config_sysclk(nau8824, NAU8824_CLK_DIS, 0);
1331 if (nau8824->irq) {
1332 /* Clear all interruption status */
1333 nau8824_int_status_clear_all(nau8824->regmap);
1334 /* Enable jack detection at sleep mode, insertion detection,
1335 * and ejection detection.
1336 */
1337 regmap_update_bits(nau8824->regmap, NAU8824_REG_ENA_CTRL,
1338 NAU8824_JD_SLEEP_MODE, NAU8824_JD_SLEEP_MODE);
1339 regmap_update_bits(nau8824->regmap,
1340 NAU8824_REG_INTERRUPT_SETTING_1,
1341 NAU8824_IRQ_EJECT_EN | NAU8824_IRQ_INSERT_EN,
1342 NAU8824_IRQ_EJECT_EN | NAU8824_IRQ_INSERT_EN);
1343 regmap_update_bits(nau8824->regmap,
1344 NAU8824_REG_INTERRUPT_SETTING,
1345 NAU8824_IRQ_EJECT_DIS | NAU8824_IRQ_INSERT_DIS, 0);
1346 }
1347}
1348
1349static int nau8824_set_bias_level(struct snd_soc_codec *codec,
1350 enum snd_soc_bias_level level)
1351{
1352 struct nau8824 *nau8824 = snd_soc_codec_get_drvdata(codec);
1353
1354 switch (level) {
1355 case SND_SOC_BIAS_ON:
1356 break;
1357
1358 case SND_SOC_BIAS_PREPARE:
1359 break;
1360
1361 case SND_SOC_BIAS_STANDBY:
1362 if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF) {
1363 /* Setup codec configuration after resume */
1364 nau8824_resume_setup(nau8824);
1365 }
1366 break;
1367
1368 case SND_SOC_BIAS_OFF:
1369 regmap_update_bits(nau8824->regmap,
1370 NAU8824_REG_INTERRUPT_SETTING, 0x3ff, 0x3ff);
1371 regmap_update_bits(nau8824->regmap,
1372 NAU8824_REG_INTERRUPT_SETTING_1,
1373 NAU8824_IRQ_EJECT_EN | NAU8824_IRQ_INSERT_EN, 0);
1374 break;
1375 }
1376
1377 return 0;
1378}
1379
1380static int nau8824_codec_probe(struct snd_soc_codec *codec)
1381{
1382 struct nau8824 *nau8824 = snd_soc_codec_get_drvdata(codec);
1383 struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
1384
1385 nau8824->dapm = dapm;
1386
1387 return 0;
1388}
1389
1390static int __maybe_unused nau8824_suspend(struct snd_soc_codec *codec)
1391{
1392 struct nau8824 *nau8824 = snd_soc_codec_get_drvdata(codec);
1393
1394 if (nau8824->irq) {
1395 disable_irq(nau8824->irq);
1396 snd_soc_codec_force_bias_level(codec, SND_SOC_BIAS_OFF);
1397 }
1398 regcache_cache_only(nau8824->regmap, true);
1399 regcache_mark_dirty(nau8824->regmap);
1400
1401 return 0;
1402}
1403
1404static int __maybe_unused nau8824_resume(struct snd_soc_codec *codec)
1405{
1406 struct nau8824 *nau8824 = snd_soc_codec_get_drvdata(codec);
1407
1408 regcache_cache_only(nau8824->regmap, false);
1409 regcache_sync(nau8824->regmap);
1410 if (nau8824->irq) {
1411 /* Hold semaphore to postpone playback happening
1412 * until jack detection done.
1413 */
1414 nau8824_sema_acquire(nau8824, 0);
1415 enable_irq(nau8824->irq);
1416 }
1417
1418 return 0;
1419}
1420
1421static struct snd_soc_codec_driver nau8824_codec_driver = {
1422 .probe = nau8824_codec_probe,
1423 .set_sysclk = nau8824_set_sysclk,
1424 .set_pll = nau8824_set_pll,
1425 .set_bias_level = nau8824_set_bias_level,
1426 .suspend = nau8824_suspend,
1427 .resume = nau8824_resume,
1428 .suspend_bias_off = true,
1429
1430 .component_driver = {
1431 .controls = nau8824_snd_controls,
1432 .num_controls = ARRAY_SIZE(nau8824_snd_controls),
1433 .dapm_widgets = nau8824_dapm_widgets,
1434 .num_dapm_widgets = ARRAY_SIZE(nau8824_dapm_widgets),
1435 .dapm_routes = nau8824_dapm_routes,
1436 .num_dapm_routes = ARRAY_SIZE(nau8824_dapm_routes),
1437 },
1438};
1439
1440static const struct snd_soc_dai_ops nau8824_dai_ops = {
1441 .hw_params = nau8824_hw_params,
1442 .set_fmt = nau8824_set_fmt,
1443};
1444
1445#define NAU8824_RATES SNDRV_PCM_RATE_8000_192000
1446#define NAU8824_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE \
1447 | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE)
1448
1449static struct snd_soc_dai_driver nau8824_dai = {
1450 .name = NAU8824_CODEC_DAI,
1451 .playback = {
1452 .stream_name = "Playback",
1453 .channels_min = 1,
1454 .channels_max = 2,
1455 .rates = NAU8824_RATES,
1456 .formats = NAU8824_FORMATS,
1457 },
1458 .capture = {
1459 .stream_name = "Capture",
1460 .channels_min = 1,
1461 .channels_max = 2,
1462 .rates = NAU8824_RATES,
1463 .formats = NAU8824_FORMATS,
1464 },
1465 .ops = &nau8824_dai_ops,
1466};
1467
1468static const struct regmap_config nau8824_regmap_config = {
1469 .val_bits = NAU8824_REG_ADDR_LEN,
1470 .reg_bits = NAU8824_REG_DATA_LEN,
1471
1472 .max_register = NAU8824_REG_MAX,
1473 .readable_reg = nau8824_readable_reg,
1474 .writeable_reg = nau8824_writeable_reg,
1475 .volatile_reg = nau8824_volatile_reg,
1476
1477 .cache_type = REGCACHE_RBTREE,
1478 .reg_defaults = nau8824_reg_defaults,
1479 .num_reg_defaults = ARRAY_SIZE(nau8824_reg_defaults),
1480};
1481
1482/**
1483 * nau8824_enable_jack_detect - Specify a jack for event reporting
1484 *
1485 * @component: component to register the jack with
1486 * @jack: jack to use to report headset and button events on
1487 *
1488 * After this function has been called the headset insert/remove and button
1489 * events will be routed to the given jack. Jack can be null to stop
1490 * reporting.
1491 */
1492int nau8824_enable_jack_detect(struct snd_soc_codec *codec,
1493 struct snd_soc_jack *jack)
1494{
1495 struct nau8824 *nau8824 = snd_soc_codec_get_drvdata(codec);
1496 int ret;
1497
1498 nau8824->jack = jack;
1499 /* Initiate jack detection work queue */
1500 INIT_WORK(&nau8824->jdet_work, nau8824_jdet_work);
1501 ret = devm_request_threaded_irq(nau8824->dev, nau8824->irq, NULL,
1502 nau8824_interrupt, IRQF_TRIGGER_LOW | IRQF_ONESHOT,
1503 "nau8824", nau8824);
1504 if (ret) {
1505 dev_err(nau8824->dev, "Cannot request irq %d (%d)\n",
1506 nau8824->irq, ret);
1507 }
1508
1509 return ret;
1510}
1511EXPORT_SYMBOL_GPL(nau8824_enable_jack_detect);
1512
1513static void nau8824_reset_chip(struct regmap *regmap)
1514{
1515 regmap_write(regmap, NAU8824_REG_RESET, 0x00);
1516 regmap_write(regmap, NAU8824_REG_RESET, 0x00);
1517}
1518
1519static void nau8824_setup_buttons(struct nau8824 *nau8824)
1520{
1521 struct regmap *regmap = nau8824->regmap;
1522
1523 regmap_update_bits(regmap, NAU8824_REG_SAR_ADC,
1524 NAU8824_SAR_TRACKING_GAIN_MASK,
1525 nau8824->sar_voltage << NAU8824_SAR_TRACKING_GAIN_SFT);
1526 regmap_update_bits(regmap, NAU8824_REG_SAR_ADC,
1527 NAU8824_SAR_COMPARE_TIME_MASK,
1528 nau8824->sar_compare_time << NAU8824_SAR_COMPARE_TIME_SFT);
1529 regmap_update_bits(regmap, NAU8824_REG_SAR_ADC,
1530 NAU8824_SAR_SAMPLING_TIME_MASK,
1531 nau8824->sar_sampling_time << NAU8824_SAR_SAMPLING_TIME_SFT);
1532
1533 regmap_update_bits(regmap, NAU8824_REG_VDET_COEFFICIENT,
1534 NAU8824_LEVELS_NR_MASK,
1535 (nau8824->sar_threshold_num - 1) << NAU8824_LEVELS_NR_SFT);
1536 regmap_update_bits(regmap, NAU8824_REG_VDET_COEFFICIENT,
1537 NAU8824_HYSTERESIS_MASK,
1538 nau8824->sar_hysteresis << NAU8824_HYSTERESIS_SFT);
1539 regmap_update_bits(regmap, NAU8824_REG_VDET_COEFFICIENT,
1540 NAU8824_SHORTKEY_DEBOUNCE_MASK,
1541 nau8824->key_debounce << NAU8824_SHORTKEY_DEBOUNCE_SFT);
1542
1543 regmap_write(regmap, NAU8824_REG_VDET_THRESHOLD_1,
1544 (nau8824->sar_threshold[0] << 8) | nau8824->sar_threshold[1]);
1545 regmap_write(regmap, NAU8824_REG_VDET_THRESHOLD_2,
1546 (nau8824->sar_threshold[2] << 8) | nau8824->sar_threshold[3]);
1547 regmap_write(regmap, NAU8824_REG_VDET_THRESHOLD_3,
1548 (nau8824->sar_threshold[4] << 8) | nau8824->sar_threshold[5]);
1549 regmap_write(regmap, NAU8824_REG_VDET_THRESHOLD_4,
1550 (nau8824->sar_threshold[6] << 8) | nau8824->sar_threshold[7]);
1551}
1552
1553static void nau8824_init_regs(struct nau8824 *nau8824)
1554{
1555 struct regmap *regmap = nau8824->regmap;
1556
1557 /* Enable Bias/VMID/VMID Tieoff */
1558 regmap_update_bits(regmap, NAU8824_REG_BIAS_ADJ,
1559 NAU8824_VMID | NAU8824_VMID_SEL_MASK, NAU8824_VMID |
1560 (nau8824->vref_impedance << NAU8824_VMID_SEL_SFT));
1561 regmap_update_bits(regmap, NAU8824_REG_BOOST,
1562 NAU8824_GLOBAL_BIAS_EN, NAU8824_GLOBAL_BIAS_EN);
1563 mdelay(2);
1564 regmap_update_bits(regmap, NAU8824_REG_MIC_BIAS,
1565 NAU8824_MICBIAS_VOLTAGE_MASK, nau8824->micbias_voltage);
1566 /* Disable Boost Driver, Automatic Short circuit protection enable */
1567 regmap_update_bits(regmap, NAU8824_REG_BOOST,
1568 NAU8824_PRECHARGE_DIS | NAU8824_HP_BOOST_DIS |
1569 NAU8824_HP_BOOST_G_DIS | NAU8824_SHORT_SHUTDOWN_EN,
1570 NAU8824_PRECHARGE_DIS | NAU8824_HP_BOOST_DIS |
1571 NAU8824_HP_BOOST_G_DIS | NAU8824_SHORT_SHUTDOWN_EN);
1572 /* Scaling for ADC and DAC clock */
1573 regmap_update_bits(regmap, NAU8824_REG_CLK_DIVIDER,
1574 NAU8824_CLK_ADC_SRC_MASK | NAU8824_CLK_DAC_SRC_MASK,
1575 (0x1 << NAU8824_CLK_ADC_SRC_SFT) |
1576 (0x1 << NAU8824_CLK_DAC_SRC_SFT));
1577 regmap_update_bits(regmap, NAU8824_REG_DAC_MUTE_CTRL,
1578 NAU8824_DAC_ZC_EN, NAU8824_DAC_ZC_EN);
1579 regmap_update_bits(regmap, NAU8824_REG_ENA_CTRL,
1580 NAU8824_DAC_CH1_EN | NAU8824_DAC_CH0_EN |
1581 NAU8824_ADC_CH0_EN | NAU8824_ADC_CH1_EN |
1582 NAU8824_ADC_CH2_EN | NAU8824_ADC_CH3_EN,
1583 NAU8824_DAC_CH1_EN | NAU8824_DAC_CH0_EN |
1584 NAU8824_ADC_CH0_EN | NAU8824_ADC_CH1_EN |
1585 NAU8824_ADC_CH2_EN | NAU8824_ADC_CH3_EN);
1586 regmap_update_bits(regmap, NAU8824_REG_CLK_GATING_ENA,
1587 NAU8824_CLK_ADC_CH23_EN | NAU8824_CLK_ADC_CH01_EN |
1588 NAU8824_CLK_DAC_CH1_EN | NAU8824_CLK_DAC_CH0_EN |
1589 NAU8824_CLK_I2S_EN | NAU8824_CLK_GAIN_EN |
1590 NAU8824_CLK_SAR_EN | NAU8824_CLK_DMIC_CH23_EN,
1591 NAU8824_CLK_ADC_CH23_EN | NAU8824_CLK_ADC_CH01_EN |
1592 NAU8824_CLK_DAC_CH1_EN | NAU8824_CLK_DAC_CH0_EN |
1593 NAU8824_CLK_I2S_EN | NAU8824_CLK_GAIN_EN |
1594 NAU8824_CLK_SAR_EN | NAU8824_CLK_DMIC_CH23_EN);
1595 /* Class G timer 64ms */
1596 regmap_update_bits(regmap, NAU8824_REG_CLASSG,
1597 NAU8824_CLASSG_TIMER_MASK,
1598 0x20 << NAU8824_CLASSG_TIMER_SFT);
1599 regmap_update_bits(regmap, NAU8824_REG_TRIM_SETTINGS,
1600 NAU8824_DRV_CURR_INC, NAU8824_DRV_CURR_INC);
1601 /* Disable DACR/L power */
1602 regmap_update_bits(regmap, NAU8824_REG_CHARGE_PUMP_CONTROL,
1603 NAU8824_SPKR_PULL_DOWN | NAU8824_SPKL_PULL_DOWN |
1604 NAU8824_POWER_DOWN_DACR | NAU8824_POWER_DOWN_DACL,
1605 NAU8824_SPKR_PULL_DOWN | NAU8824_SPKL_PULL_DOWN |
1606 NAU8824_POWER_DOWN_DACR | NAU8824_POWER_DOWN_DACL);
1607 /* Enable TESTDAC. This sets the analog DAC inputs to a '0' input
1608 * signal to avoid any glitches due to power up transients in both
1609 * the analog and digital DAC circuit.
1610 */
1611 regmap_update_bits(regmap, NAU8824_REG_ENABLE_LO,
1612 NAU8824_TEST_DAC_EN, NAU8824_TEST_DAC_EN);
1613 /* Config L/R channel */
1614 regmap_update_bits(regmap, NAU8824_REG_DAC_CH0_DGAIN_CTRL,
1615 NAU8824_DAC_CH0_SEL_MASK, NAU8824_DAC_CH0_SEL_I2S0);
1616 regmap_update_bits(regmap, NAU8824_REG_DAC_CH1_DGAIN_CTRL,
1617 NAU8824_DAC_CH1_SEL_MASK, NAU8824_DAC_CH1_SEL_I2S1);
1618 regmap_update_bits(regmap, NAU8824_REG_ENABLE_LO,
1619 NAU8824_DACR_HPR_EN | NAU8824_DACL_HPL_EN,
1620 NAU8824_DACR_HPR_EN | NAU8824_DACL_HPL_EN);
1621 /* Default oversampling/decimations settings are unusable
1622 * (audible hiss). Set it to something better.
1623 */
1624 regmap_update_bits(regmap, NAU8824_REG_ADC_FILTER_CTRL,
1625 NAU8824_ADC_SYNC_DOWN_MASK, NAU8824_ADC_SYNC_DOWN_64);
1626 regmap_update_bits(regmap, NAU8824_REG_DAC_FILTER_CTRL_1,
1627 NAU8824_DAC_CICCLP_OFF | NAU8824_DAC_OVERSAMPLE_MASK,
1628 NAU8824_DAC_CICCLP_OFF | NAU8824_DAC_OVERSAMPLE_64);
1629 /* DAC clock delay 2ns, VREF */
1630 regmap_update_bits(regmap, NAU8824_REG_RDAC,
1631 NAU8824_RDAC_CLK_DELAY_MASK | NAU8824_RDAC_VREF_MASK,
1632 (0x2 << NAU8824_RDAC_CLK_DELAY_SFT) |
1633 (0x3 << NAU8824_RDAC_VREF_SFT));
1634 /* PGA input mode selection */
1635 regmap_update_bits(regmap, NAU8824_REG_FEPGA,
1636 NAU8824_FEPGA_MODEL_SHORT_EN | NAU8824_FEPGA_MODER_SHORT_EN,
1637 NAU8824_FEPGA_MODEL_SHORT_EN | NAU8824_FEPGA_MODER_SHORT_EN);
1638 /* Digital microphone control */
1639 regmap_update_bits(regmap, NAU8824_REG_ANALOG_CONTROL_1,
1640 NAU8824_DMIC_CLK_DRV_STRG | NAU8824_DMIC_CLK_SLEW_FAST,
1641 NAU8824_DMIC_CLK_DRV_STRG | NAU8824_DMIC_CLK_SLEW_FAST);
1642 regmap_update_bits(regmap, NAU8824_REG_JACK_DET_CTRL,
1643 NAU8824_JACK_LOGIC,
1644 /* jkdet_polarity - 1 is for active-low */
1645 nau8824->jkdet_polarity ? 0 : NAU8824_JACK_LOGIC);
1646 regmap_update_bits(regmap,
1647 NAU8824_REG_JACK_DET_CTRL, NAU8824_JACK_EJECT_DT_MASK,
1648 (nau8824->jack_eject_debounce << NAU8824_JACK_EJECT_DT_SFT));
1649 if (nau8824->sar_threshold_num)
1650 nau8824_setup_buttons(nau8824);
1651}
1652
1653static int nau8824_setup_irq(struct nau8824 *nau8824)
1654{
1655 /* Disable interruption before codec initiation done */
1656 regmap_update_bits(nau8824->regmap, NAU8824_REG_ENA_CTRL,
1657 NAU8824_JD_SLEEP_MODE, NAU8824_JD_SLEEP_MODE);
1658 regmap_update_bits(nau8824->regmap,
1659 NAU8824_REG_INTERRUPT_SETTING, 0x3ff, 0x3ff);
1660 regmap_update_bits(nau8824->regmap, NAU8824_REG_INTERRUPT_SETTING_1,
1661 NAU8824_IRQ_EJECT_EN | NAU8824_IRQ_INSERT_EN, 0);
1662
1663 return 0;
1664}
1665
1666static void nau8824_print_device_properties(struct nau8824 *nau8824)
1667{
1668 struct device *dev = nau8824->dev;
1669 int i;
1670
1671 dev_dbg(dev, "jkdet-polarity: %d\n", nau8824->jkdet_polarity);
1672 dev_dbg(dev, "micbias-voltage: %d\n", nau8824->micbias_voltage);
1673 dev_dbg(dev, "vref-impedance: %d\n", nau8824->vref_impedance);
1674
1675 dev_dbg(dev, "sar-threshold-num: %d\n", nau8824->sar_threshold_num);
1676 for (i = 0; i < nau8824->sar_threshold_num; i++)
1677 dev_dbg(dev, "sar-threshold[%d]=%x\n", i,
1678 nau8824->sar_threshold[i]);
1679
1680 dev_dbg(dev, "sar-hysteresis: %d\n", nau8824->sar_hysteresis);
1681 dev_dbg(dev, "sar-voltage: %d\n", nau8824->sar_voltage);
1682 dev_dbg(dev, "sar-compare-time: %d\n", nau8824->sar_compare_time);
1683 dev_dbg(dev, "sar-sampling-time: %d\n", nau8824->sar_sampling_time);
1684 dev_dbg(dev, "short-key-debounce: %d\n", nau8824->key_debounce);
1685 dev_dbg(dev, "jack-eject-debounce: %d\n",
1686 nau8824->jack_eject_debounce);
1687}
1688
1689static int nau8824_read_device_properties(struct device *dev,
1690 struct nau8824 *nau8824) {
1691 int ret;
1692
1693 ret = device_property_read_u32(dev, "nuvoton,jkdet-polarity",
1694 &nau8824->jkdet_polarity);
1695 if (ret)
1696 nau8824->jkdet_polarity = 1;
1697 ret = device_property_read_u32(dev, "nuvoton,micbias-voltage",
1698 &nau8824->micbias_voltage);
1699 if (ret)
1700 nau8824->micbias_voltage = 6;
1701 ret = device_property_read_u32(dev, "nuvoton,vref-impedance",
1702 &nau8824->vref_impedance);
1703 if (ret)
1704 nau8824->vref_impedance = 2;
1705 ret = device_property_read_u32(dev, "nuvoton,sar-threshold-num",
1706 &nau8824->sar_threshold_num);
1707 if (ret)
1708 nau8824->sar_threshold_num = 4;
1709 ret = device_property_read_u32_array(dev, "nuvoton,sar-threshold",
1710 nau8824->sar_threshold, nau8824->sar_threshold_num);
1711 if (ret) {
1712 nau8824->sar_threshold[0] = 0x0a;
1713 nau8824->sar_threshold[1] = 0x14;
1714 nau8824->sar_threshold[2] = 0x26;
1715 nau8824->sar_threshold[3] = 0x73;
1716 }
1717 ret = device_property_read_u32(dev, "nuvoton,sar-hysteresis",
1718 &nau8824->sar_hysteresis);
1719 if (ret)
1720 nau8824->sar_hysteresis = 0;
1721 ret = device_property_read_u32(dev, "nuvoton,sar-voltage",
1722 &nau8824->sar_voltage);
1723 if (ret)
1724 nau8824->sar_voltage = 6;
1725 ret = device_property_read_u32(dev, "nuvoton,sar-compare-time",
1726 &nau8824->sar_compare_time);
1727 if (ret)
1728 nau8824->sar_compare_time = 1;
1729 ret = device_property_read_u32(dev, "nuvoton,sar-sampling-time",
1730 &nau8824->sar_sampling_time);
1731 if (ret)
1732 nau8824->sar_sampling_time = 1;
1733 ret = device_property_read_u32(dev, "nuvoton,short-key-debounce",
1734 &nau8824->key_debounce);
1735 if (ret)
1736 nau8824->key_debounce = 0;
1737 ret = device_property_read_u32(dev, "nuvoton,jack-eject-debounce",
1738 &nau8824->jack_eject_debounce);
1739 if (ret)
1740 nau8824->jack_eject_debounce = 1;
1741
1742 return 0;
1743}
1744
1745static int nau8824_i2c_probe(struct i2c_client *i2c,
1746 const struct i2c_device_id *id)
1747{
1748 struct device *dev = &i2c->dev;
1749 struct nau8824 *nau8824 = dev_get_platdata(dev);
1750 int ret, value;
1751
1752 if (!nau8824) {
1753 nau8824 = devm_kzalloc(dev, sizeof(*nau8824), GFP_KERNEL);
1754 if (!nau8824)
1755 return -ENOMEM;
1756 ret = nau8824_read_device_properties(dev, nau8824);
1757 if (ret)
1758 return ret;
1759 }
1760 i2c_set_clientdata(i2c, nau8824);
1761
1762 nau8824->regmap = devm_regmap_init_i2c(i2c, &nau8824_regmap_config);
1763 if (IS_ERR(nau8824->regmap))
1764 return PTR_ERR(nau8824->regmap);
1765 nau8824->dev = dev;
1766 nau8824->irq = i2c->irq;
1767 sema_init(&nau8824->jd_sem, 1);
1768
1769 nau8824_print_device_properties(nau8824);
1770
1771 ret = regmap_read(nau8824->regmap, NAU8824_REG_I2C_DEVICE_ID, &value);
1772 if (ret < 0) {
1773 dev_err(dev, "Failed to read device id from the NAU8824: %d\n",
1774 ret);
1775 return ret;
1776 }
1777 nau8824_reset_chip(nau8824->regmap);
1778 nau8824_init_regs(nau8824);
1779
1780 if (i2c->irq)
1781 nau8824_setup_irq(nau8824);
1782
1783 return snd_soc_register_codec(dev,
1784 &nau8824_codec_driver, &nau8824_dai, 1);
1785}
1786
1787
1788static int nau8824_i2c_remove(struct i2c_client *client)
1789{
1790 snd_soc_unregister_codec(&client->dev);
1791 return 0;
1792}
1793
1794static const struct i2c_device_id nau8824_i2c_ids[] = {
1795 { "nau8824", 0 },
1796 { }
1797};
1798MODULE_DEVICE_TABLE(i2c, nau8824_i2c_ids);
1799
1800#ifdef CONFIG_OF
1801static const struct of_device_id nau8824_of_ids[] = {
1802 { .compatible = "nuvoton,nau8824", },
1803 {}
1804};
1805MODULE_DEVICE_TABLE(of, nau8824_of_ids);
1806#endif
1807
1808#ifdef CONFIG_ACPI
1809static const struct acpi_device_id nau8824_acpi_match[] = {
1810 { "10508824", 0 },
1811 {},
1812};
1813MODULE_DEVICE_TABLE(acpi, nau8824_acpi_match);
1814#endif
1815
1816static struct i2c_driver nau8824_i2c_driver = {
1817 .driver = {
1818 .name = "nau8824",
1819 .of_match_table = of_match_ptr(nau8824_of_ids),
1820 .acpi_match_table = ACPI_PTR(nau8824_acpi_match),
1821 },
1822 .probe = nau8824_i2c_probe,
1823 .remove = nau8824_i2c_remove,
1824 .id_table = nau8824_i2c_ids,
1825};
1826module_i2c_driver(nau8824_i2c_driver);
1827
1828
1829MODULE_DESCRIPTION("ASoC NAU88L24 driver");
1830MODULE_AUTHOR("John Hsu <KCHSU0@nuvoton.com>");
1831MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/nau8824.h b/sound/soc/codecs/nau8824.h
new file mode 100644
index 000000000000..87ac9a382aed
--- /dev/null
+++ b/sound/soc/codecs/nau8824.h
@@ -0,0 +1,466 @@
1/*
2 * NAU88L24 ALSA SoC audio driver
3 *
4 * Copyright 2016 Nuvoton Technology Corp.
5 * Author: John Hsu <KCHSU0@nuvoton.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef __NAU8824_H__
13#define __NAU8824_H__
14
15#define NAU8824_REG_RESET 0x00
16#define NAU8824_REG_ENA_CTRL 0x01
17#define NAU8824_REG_CLK_GATING_ENA 0x02
18#define NAU8824_REG_CLK_DIVIDER 0x03
19#define NAU8824_REG_FLL1 0x04
20#define NAU8824_REG_FLL2 0x05
21#define NAU8824_REG_FLL3 0x06
22#define NAU8824_REG_FLL4 0x07
23#define NAU8824_REG_FLL5 0x08
24#define NAU8824_REG_FLL6 0x09
25#define NAU8824_REG_FLL_VCO_RSV 0x0A
26#define NAU8824_REG_JACK_DET_CTRL 0x0D
27#define NAU8824_REG_INTERRUPT_SETTING_1 0x0F
28#define NAU8824_REG_IRQ 0x10
29#define NAU8824_REG_CLEAR_INT_REG 0x11
30#define NAU8824_REG_INTERRUPT_SETTING 0x12
31#define NAU8824_REG_SAR_ADC 0x13
32#define NAU8824_REG_VDET_COEFFICIENT 0x14
33#define NAU8824_REG_VDET_THRESHOLD_1 0x15
34#define NAU8824_REG_VDET_THRESHOLD_2 0x16
35#define NAU8824_REG_VDET_THRESHOLD_3 0x17
36#define NAU8824_REG_VDET_THRESHOLD_4 0x18
37#define NAU8824_REG_GPIO_SEL 0x1A
38#define NAU8824_REG_PORT0_I2S_PCM_CTRL_1 0x1C
39#define NAU8824_REG_PORT0_I2S_PCM_CTRL_2 0x1D
40#define NAU8824_REG_PORT0_LEFT_TIME_SLOT 0x1E
41#define NAU8824_REG_PORT0_RIGHT_TIME_SLOT 0x1F
42#define NAU8824_REG_TDM_CTRL 0x20
43#define NAU8824_REG_ADC_HPF_FILTER 0x23
44#define NAU8824_REG_ADC_FILTER_CTRL 0x24
45#define NAU8824_REG_DAC_FILTER_CTRL_1 0x25
46#define NAU8824_REG_DAC_FILTER_CTRL_2 0x26
47#define NAU8824_REG_NOTCH_FILTER_1 0x27
48#define NAU8824_REG_NOTCH_FILTER_2 0x28
49#define NAU8824_REG_EQ1_LOW 0x29
50#define NAU8824_REG_EQ2_EQ3 0x2A
51#define NAU8824_REG_EQ4_EQ5 0x2B
52#define NAU8824_REG_ADC_CH0_DGAIN_CTRL 0x2D
53#define NAU8824_REG_ADC_CH1_DGAIN_CTRL 0x2E
54#define NAU8824_REG_ADC_CH2_DGAIN_CTRL 0x2F
55#define NAU8824_REG_ADC_CH3_DGAIN_CTRL 0x30
56#define NAU8824_REG_DAC_MUTE_CTRL 0x31
57#define NAU8824_REG_DAC_CH0_DGAIN_CTRL 0x32
58#define NAU8824_REG_DAC_CH1_DGAIN_CTRL 0x33
59#define NAU8824_REG_ADC_TO_DAC_ST 0x34
60#define NAU8824_REG_DRC_KNEE_IP12_ADC_CH01 0x38
61#define NAU8824_REG_DRC_KNEE_IP34_ADC_CH01 0x39
62#define NAU8824_REG_DRC_SLOPE_ADC_CH01 0x3A
63#define NAU8824_REG_DRC_ATKDCY_ADC_CH01 0x3B
64#define NAU8824_REG_DRC_KNEE_IP12_ADC_CH23 0x3C
65#define NAU8824_REG_DRC_KNEE_IP34_ADC_CH23 0x3D
66#define NAU8824_REG_DRC_SLOPE_ADC_CH23 0x3E
67#define NAU8824_REG_DRC_ATKDCY_ADC_CH23 0x3F
68#define NAU8824_REG_DRC_GAINL_ADC0 0x40
69#define NAU8824_REG_DRC_GAINL_ADC1 0x41
70#define NAU8824_REG_DRC_GAINL_ADC2 0x42
71#define NAU8824_REG_DRC_GAINL_ADC3 0x43
72#define NAU8824_REG_DRC_KNEE_IP12_DAC 0x45
73#define NAU8824_REG_DRC_KNEE_IP34_DAC 0x46
74#define NAU8824_REG_DRC_SLOPE_DAC 0x47
75#define NAU8824_REG_DRC_ATKDCY_DAC 0x48
76#define NAU8824_REG_DRC_GAIN_DAC_CH0 0x49
77#define NAU8824_REG_DRC_GAIN_DAC_CH1 0x4A
78#define NAU8824_REG_MODE 0x4C
79#define NAU8824_REG_MODE1 0x4D
80#define NAU8824_REG_MODE2 0x4E
81#define NAU8824_REG_CLASSG 0x50
82#define NAU8824_REG_OTP_EFUSE 0x51
83#define NAU8824_REG_OTPDOUT_1 0x53
84#define NAU8824_REG_OTPDOUT_2 0x54
85#define NAU8824_REG_MISC_CTRL 0x55
86#define NAU8824_REG_I2C_TIMEOUT 0x56
87#define NAU8824_REG_TEST_MODE 0x57
88#define NAU8824_REG_I2C_DEVICE_ID 0x58
89#define NAU8824_REG_SAR_ADC_DATA_OUT 0x59
90#define NAU8824_REG_BIAS_ADJ 0x66
91#define NAU8824_REG_PGA_GAIN 0x67
92#define NAU8824_REG_TRIM_SETTINGS 0x68
93#define NAU8824_REG_ANALOG_CONTROL_1 0x69
94#define NAU8824_REG_ANALOG_CONTROL_2 0x6A
95#define NAU8824_REG_ENABLE_LO 0x6B
96#define NAU8824_REG_GAIN_LO 0x6C
97#define NAU8824_REG_CLASSD_GAIN_1 0x6D
98#define NAU8824_REG_CLASSD_GAIN_2 0x6E
99#define NAU8824_REG_ANALOG_ADC_1 0x71
100#define NAU8824_REG_ANALOG_ADC_2 0x72
101#define NAU8824_REG_RDAC 0x73
102#define NAU8824_REG_MIC_BIAS 0x74
103#define NAU8824_REG_HS_VOLUME_CONTROL 0x75
104#define NAU8824_REG_BOOST 0x76
105#define NAU8824_REG_FEPGA 0x77
106#define NAU8824_REG_FEPGA_II 0x78
107#define NAU8824_REG_FEPGA_SE 0x79
108#define NAU8824_REG_FEPGA_ATTENUATION 0x7A
109#define NAU8824_REG_ATT_PORT0 0x7B
110#define NAU8824_REG_ATT_PORT1 0x7C
111#define NAU8824_REG_POWER_UP_CONTROL 0x7F
112#define NAU8824_REG_CHARGE_PUMP_CONTROL 0x80
113#define NAU8824_REG_CHARGE_PUMP_INPUT 0x81
114#define NAU8824_REG_MAX NAU8824_REG_CHARGE_PUMP_INPUT
115/* 16-bit control register address, and 16-bits control register data */
116#define NAU8824_REG_ADDR_LEN 16
117#define NAU8824_REG_DATA_LEN 16
118
119
120/* ENA_CTRL (0x1) */
121#define NAU8824_DMIC_LCH_EDGE_CH23 (0x1 << 12)
122#define NAU8824_DMIC_LCH_EDGE_CH01 (0x1 << 11)
123#define NAU8824_JD_SLEEP_MODE (0x1 << 10)
124#define NAU8824_ADC_CH3_DMIC_SFT 9
125#define NAU8824_ADC_CH3_DMIC_EN (0x1 << NAU8824_ADC_CH3_DMIC_SFT)
126#define NAU8824_ADC_CH2_DMIC_SFT 8
127#define NAU8824_ADC_CH2_DMIC_EN (0x1 << NAU8824_ADC_CH2_DMIC_SFT)
128#define NAU8824_ADC_CH1_DMIC_SFT 7
129#define NAU8824_ADC_CH1_DMIC_EN (0x1 << NAU8824_ADC_CH1_DMIC_SFT)
130#define NAU8824_ADC_CH0_DMIC_SFT 6
131#define NAU8824_ADC_CH0_DMIC_EN (0x1 << NAU8824_ADC_CH0_DMIC_SFT)
132#define NAU8824_DAC_CH1_EN (0x1 << 5)
133#define NAU8824_DAC_CH0_EN (0x1 << 4)
134#define NAU8824_ADC_CH3_EN (0x1 << 3)
135#define NAU8824_ADC_CH2_EN (0x1 << 2)
136#define NAU8824_ADC_CH1_EN (0x1 << 1)
137#define NAU8824_ADC_CH0_EN 0x1
138
139/* CLK_GATING_ENA (0x02) */
140#define NAU8824_CLK_ADC_CH23_EN (0x1 << 15)
141#define NAU8824_CLK_ADC_CH01_EN (0x1 << 14)
142#define NAU8824_CLK_DAC_CH1_EN (0x1 << 13)
143#define NAU8824_CLK_DAC_CH0_EN (0x1 << 12)
144#define NAU8824_CLK_I2S_EN (0x1 << 7)
145#define NAU8824_CLK_GAIN_EN (0x1 << 5)
146#define NAU8824_CLK_SAR_EN (0x1 << 3)
147#define NAU8824_CLK_DMIC_CH23_EN (0x1 << 1)
148
149/* CLK_DIVIDER (0x3) */
150#define NAU8824_CLK_SRC_SFT 15
151#define NAU8824_CLK_SRC_MASK (1 << NAU8824_CLK_SRC_SFT)
152#define NAU8824_CLK_SRC_VCO (1 << NAU8824_CLK_SRC_SFT)
153#define NAU8824_CLK_SRC_MCLK (0 << NAU8824_CLK_SRC_SFT)
154#define NAU8824_CLK_MCLK_SRC_MASK (0xf << 0)
155#define NAU8824_CLK_DMIC_SRC_SFT 10
156#define NAU8824_CLK_DMIC_SRC_MASK (0x7 << NAU8824_CLK_DMIC_SRC_SFT)
157#define NAU8824_CLK_ADC_SRC_SFT 6
158#define NAU8824_CLK_ADC_SRC_MASK (0x3 << NAU8824_CLK_ADC_SRC_SFT)
159#define NAU8824_CLK_DAC_SRC_SFT 4
160#define NAU8824_CLK_DAC_SRC_MASK (0x3 << NAU8824_CLK_DAC_SRC_SFT)
161
162/* FLL1 (0x04) */
163#define NAU8824_FLL_RATIO_MASK (0x7f << 0)
164
165/* FLL3 (0x06) */
166#define NAU8824_FLL_INTEGER_MASK (0x3ff << 0)
167#define NAU8824_FLL_CLK_SRC_SFT 10
168#define NAU8824_FLL_CLK_SRC_MASK (0x3 << NAU8824_FLL_CLK_SRC_SFT)
169#define NAU8824_FLL_CLK_SRC_MCLK (0 << NAU8824_FLL_CLK_SRC_SFT)
170#define NAU8824_FLL_CLK_SRC_BLK (0x2 << NAU8824_FLL_CLK_SRC_SFT)
171#define NAU8824_FLL_CLK_SRC_FS (0x3 << NAU8824_FLL_CLK_SRC_SFT)
172
173/* FLL4 (0x07) */
174#define NAU8824_FLL_REF_DIV_SFT 10
175#define NAU8824_FLL_REF_DIV_MASK (0x3 << NAU8824_FLL_REF_DIV_SFT)
176
177/* FLL5 (0x08) */
178#define NAU8824_FLL_PDB_DAC_EN (0x1 << 15)
179#define NAU8824_FLL_LOOP_FTR_EN (0x1 << 14)
180#define NAU8824_FLL_CLK_SW_MASK (0x1 << 13)
181#define NAU8824_FLL_CLK_SW_N2 (0x1 << 13)
182#define NAU8824_FLL_CLK_SW_REF (0x0 << 13)
183#define NAU8824_FLL_FTR_SW_MASK (0x1 << 12)
184#define NAU8824_FLL_FTR_SW_ACCU (0x1 << 12)
185#define NAU8824_FLL_FTR_SW_FILTER (0x0 << 12)
186
187/* FLL6 (0x9) */
188#define NAU8824_DCO_EN (0x1 << 15)
189#define NAU8824_SDM_EN (0x1 << 14)
190
191/* IRQ (0x10) */
192#define NAU8824_SHORT_CIRCUIT_IRQ (0x1 << 7)
193#define NAU8824_IMPEDANCE_MEAS_IRQ (0x1 << 6)
194#define NAU8824_KEY_RELEASE_IRQ (0x1 << 5)
195#define NAU8824_KEY_LONG_PRESS_IRQ (0x1 << 4)
196#define NAU8824_KEY_SHORT_PRESS_IRQ (0x1 << 3)
197#define NAU8824_JACK_EJECTION_DETECTED (0x1 << 1)
198#define NAU8824_JACK_INSERTION_DETECTED 0x1
199
200/* JACK_DET_CTRL (0x0D) */
201#define NAU8824_JACK_EJECT_DT_SFT 2
202#define NAU8824_JACK_EJECT_DT_MASK (0x3 << NAU8824_JACK_EJECT_DT_SFT)
203#define NAU8824_JACK_LOGIC 0x1
204
205
206/* INTERRUPT_SETTING_1 (0x0F) */
207#define NAU8824_IRQ_EJECT_EN (0x1 << 9)
208#define NAU8824_IRQ_INSERT_EN (0x1 << 8)
209
210/* INTERRUPT_SETTING (0x12) */
211#define NAU8824_IRQ_KEY_RELEASE_DIS (0x1 << 5)
212#define NAU8824_IRQ_KEY_SHORT_PRESS_DIS (0x1 << 3)
213#define NAU8824_IRQ_EJECT_DIS (0x1 << 1)
214#define NAU8824_IRQ_INSERT_DIS 0x1
215
216/* SAR_ADC (0x13) */
217#define NAU8824_SAR_ADC_EN_SFT 12
218#define NAU8824_SAR_TRACKING_GAIN_SFT 8
219#define NAU8824_SAR_TRACKING_GAIN_MASK (0x7 << NAU8824_SAR_TRACKING_GAIN_SFT)
220#define NAU8824_SAR_COMPARE_TIME_SFT 2
221#define NAU8824_SAR_COMPARE_TIME_MASK (3 << 2)
222#define NAU8824_SAR_SAMPLING_TIME_SFT 0
223#define NAU8824_SAR_SAMPLING_TIME_MASK (3 << 0)
224
225/* VDET_COEFFICIENT (0x14) */
226#define NAU8824_SHORTKEY_DEBOUNCE_SFT 12
227#define NAU8824_SHORTKEY_DEBOUNCE_MASK (0x3 << NAU8824_SHORTKEY_DEBOUNCE_SFT)
228#define NAU8824_LEVELS_NR_SFT 8
229#define NAU8824_LEVELS_NR_MASK (0x7 << 8)
230#define NAU8824_HYSTERESIS_SFT 0
231#define NAU8824_HYSTERESIS_MASK 0xf
232
233/* PORT0_I2S_PCM_CTRL_1 (0x1C) */
234#define NAU8824_I2S_BP_SFT 7
235#define NAU8824_I2S_BP_MASK (1 << NAU8824_I2S_BP_SFT)
236#define NAU8824_I2S_BP_INV (1 << NAU8824_I2S_BP_SFT)
237#define NAU8824_I2S_PCMB_SFT 6
238#define NAU8824_I2S_PCMB_EN (1 << NAU8824_I2S_PCMB_SFT)
239#define NAU8824_I2S_DL_SFT 2
240#define NAU8824_I2S_DL_MASK (0x3 << NAU8824_I2S_DL_SFT)
241#define NAU8824_I2S_DL_16 (0 << NAU8824_I2S_DL_SFT)
242#define NAU8824_I2S_DL_20 (1 << NAU8824_I2S_DL_SFT)
243#define NAU8824_I2S_DL_24 (2 << NAU8824_I2S_DL_SFT)
244#define NAU8824_I2S_DL_32 (3 << NAU8824_I2S_DL_SFT)
245#define NAU8824_I2S_DF_MASK 0x3
246#define NAU8824_I2S_DF_RIGTH 0
247#define NAU8824_I2S_DF_LEFT 1
248#define NAU8824_I2S_DF_I2S 2
249#define NAU8824_I2S_DF_PCM_AB 3
250
251
252/* PORT0_I2S_PCM_CTRL_2 (0x1D) */
253#define NAU8824_I2S_LRC_DIV_SFT 12
254#define NAU8824_I2S_LRC_DIV_MASK (0x3 << NAU8824_I2S_LRC_DIV_SFT)
255#define NAU8824_I2S_MS_SFT 3
256#define NAU8824_I2S_MS_MASK (1 << NAU8824_I2S_MS_SFT)
257#define NAU8824_I2S_MS_MASTER (1 << NAU8824_I2S_MS_SFT)
258#define NAU8824_I2S_MS_SLAVE (0 << NAU8824_I2S_MS_SFT)
259#define NAU8824_I2S_BLK_DIV_MASK 0x7
260
261/* ADC_FILTER_CTRL (0x24) */
262#define NAU8824_ADC_SYNC_DOWN_MASK 0x3
263#define NAU8824_ADC_SYNC_DOWN_32 0
264#define NAU8824_ADC_SYNC_DOWN_64 1
265#define NAU8824_ADC_SYNC_DOWN_128 2
266#define NAU8824_ADC_SYNC_DOWN_256 3
267
268/* DAC_FILTER_CTRL_1 (0x25) */
269#define NAU8824_DAC_CICCLP_OFF (0x1 << 7)
270#define NAU8824_DAC_OVERSAMPLE_MASK 0x7
271#define NAU8824_DAC_OVERSAMPLE_64 0
272#define NAU8824_DAC_OVERSAMPLE_256 1
273#define NAU8824_DAC_OVERSAMPLE_128 2
274#define NAU8824_DAC_OVERSAMPLE_32 4
275
276/* DAC_MUTE_CTRL (0x31) */
277#define NAU8824_DAC_CH01_MIX 0x3
278#define NAU8824_DAC_ZC_EN (0x1 << 11)
279
280/* DAC_CH0_DGAIN_CTRL (0x32) */
281#define NAU8824_DAC_CH0_SEL_SFT 9
282#define NAU8824_DAC_CH0_SEL_MASK (0x1 << NAU8824_DAC_CH0_SEL_SFT)
283#define NAU8824_DAC_CH0_SEL_I2S0 (0x0 << NAU8824_DAC_CH0_SEL_SFT)
284#define NAU8824_DAC_CH0_SEL_I2S1 (0x1 << NAU8824_DAC_CH0_SEL_SFT)
285#define NAU8824_DAC_CH0_VOL_MASK 0x1ff
286
287/* DAC_CH1_DGAIN_CTRL (0x33) */
288#define NAU8824_DAC_CH1_SEL_SFT 9
289#define NAU8824_DAC_CH1_SEL_MASK (0x1 << NAU8824_DAC_CH1_SEL_SFT)
290#define NAU8824_DAC_CH1_SEL_I2S0 (0x0 << NAU8824_DAC_CH1_SEL_SFT)
291#define NAU8824_DAC_CH1_SEL_I2S1 (0x1 << NAU8824_DAC_CH1_SEL_SFT)
292#define NAU8824_DAC_CH1_VOL_MASK 0x1ff
293
294/* CLASSG (0x50) */
295#define NAU8824_CLASSG_TIMER_SFT 8
296#define NAU8824_CLASSG_TIMER_MASK (0x3f << NAU8824_CLASSG_TIMER_SFT)
297#define NAU8824_CLASSG_LDAC_EN_SFT 2
298#define NAU8824_CLASSG_RDAC_EN_SFT 1
299#define NAU8824_CLASSG_EN_SFT 0
300
301/* SAR_ADC_DATA_OUT (0x59) */
302#define NAU8824_SAR_ADC_DATA_MASK 0xff
303
304/* BIAS_ADJ (0x66) */
305#define NAU8824_VMID (1 << 6)
306#define NAU8824_VMID_SEL_SFT 4
307#define NAU8824_VMID_SEL_MASK (3 << NAU8824_VMID_SEL_SFT)
308#define NAU8824_DMIC2_EN_SFT 3
309#define NAU8824_DMIC1_EN_SFT 2
310
311/* TRIM_SETTINGS (0x68) */
312#define NAU8824_DRV_CURR_INC (1 << 15)
313
314/* ANALOG_CONTROL_1 (0x69) */
315#define NAU8824_DMIC_CLK_DRV_STRG (1 << 3)
316#define NAU8824_DMIC_CLK_SLEW_FAST (0x7)
317
318/* ANALOG_CONTROL_2 (0x6A) */
319#define NAU8824_CLASSD_CLAMP_DIS_SFT 3
320#define NAU8824_CLASSD_CLAMP_DIS (0x1 << NAU8824_CLASSD_CLAMP_DIS_SFT)
321
322/* ENABLE_LO (0x6B) */
323#define NAU8824_TEST_DAC_SFT 14
324#define NAU8824_TEST_DAC_EN (0x3 << NAU8824_TEST_DAC_SFT)
325#define NAU8824_DACL_HPR_EN_SFT 3
326#define NAU8824_DACL_HPR_EN (0x1 << NAU8824_DACL_HPR_EN_SFT)
327#define NAU8824_DACR_HPR_EN_SFT 2
328#define NAU8824_DACR_HPR_EN (0x1 << NAU8824_DACR_HPR_EN_SFT)
329#define NAU8824_DACR_HPL_EN_SFT 1
330#define NAU8824_DACR_HPL_EN (0x1 << NAU8824_DACR_HPL_EN_SFT)
331#define NAU8824_DACL_HPL_EN_SFT 0
332#define NAU8824_DACL_HPL_EN 0x1
333
334/* CLASSD_GAIN_1 (0x6D) */
335#define NAU8824_CLASSD_GAIN_1R_SFT 8
336#define NAU8824_CLASSD_GAIN_1R_MASK (0x1f << NAU8824_CLASSD_GAIN_1R_SFT)
337#define NAU8824_CLASSD_EN_SFT 7
338#define NAU8824_CLASSD_EN (0x1 << NAU8824_CLASSD_EN_SFT)
339#define NAU8824_CLASSD_GAIN_1L_MASK 0x1f
340
341/* CLASSD_GAIN_2 (0x6E) */
342#define NAU8824_CLASSD_GAIN_2R_SFT 8
343#define NAU8824_CLASSD_GAIN_2R_MASK (0x1f << NAU8824_CLASSD_GAIN_1R_SFT)
344#define NAU8824_CLASSD_EN_SFT 7
345#define NAU8824_CLASSD_EN (0x1 << NAU8824_CLASSD_EN_SFT)
346#define NAU8824_CLASSD_GAIN_2L_MASK 0x1f
347
348/* ANALOG_ADC_2 (0x72) */
349#define NAU8824_ADCR_EN_SFT 7
350#define NAU8824_ADCL_EN_SFT 6
351
352/* RDAC (0x73) */
353#define NAU8824_DACR_EN_SFT 13
354#define NAU8824_DACL_EN_SFT 12
355#define NAU8824_DACR_CLK_SFT 9
356#define NAU8824_DACL_CLK_SFT 8
357#define NAU8824_RDAC_CLK_DELAY_SFT 4
358#define NAU8824_RDAC_CLK_DELAY_MASK (0x7 << NAU8824_RDAC_CLK_DELAY_SFT)
359#define NAU8824_RDAC_VREF_SFT 2
360#define NAU8824_RDAC_VREF_MASK (0x3 << NAU8824_RDAC_VREF_SFT)
361
362/* MIC_BIAS (0x74) */
363#define NAU8824_MICBIAS_JKSLV (1 << 14)
364#define NAU8824_MICBIAS_JKR2 (1 << 12)
365#define NAU8824_MICBIAS_POWERUP_SFT 8
366#define NAU8824_MICBIAS_VOLTAGE_SFT 0
367#define NAU8824_MICBIAS_VOLTAGE_MASK 0x7
368
369/* BOOST (0x76) */
370#define NAU8824_PRECHARGE_DIS (0x1 << 13)
371#define NAU8824_GLOBAL_BIAS_EN (0x1 << 12)
372#define NAU8824_HP_BOOST_DIS_SFT 9
373#define NAU8824_HP_BOOST_DIS (0x1 << NAU8824_HP_BOOST_DIS_SFT)
374#define NAU8824_HP_BOOST_G_DIS_SFT 8
375#define NAU8824_HP_BOOST_G_DIS (0x1 << NAU8824_HP_BOOST_G_DIS_SFT)
376#define NAU8824_SHORT_SHUTDOWN_DIG_EN (1 << 7)
377#define NAU8824_SHORT_SHUTDOWN_EN (1 << 6)
378
379/* FEPGA (0x77) */
380#define NAU8824_FEPGA_MODER_SHORT_SFT 7
381#define NAU8824_FEPGA_MODER_SHORT_EN (0x1 << NAU8824_FEPGA_MODER_SHORT_SFT)
382#define NAU8824_FEPGA_MODER_MIC2_SFT 5
383#define NAU8824_FEPGA_MODER_MIC2_EN (0x1 << NAU8824_FEPGA_MODER_MIC2_SFT)
384#define NAU8824_FEPGA_MODER_HSMIC_SFT 4
385#define NAU8824_FEPGA_MODER_HSMIC_EN (0x1 << NAU8824_FEPGA_MODER_HSMIC_SFT)
386#define NAU8824_FEPGA_MODEL_SHORT_SFT 3
387#define NAU8824_FEPGA_MODEL_SHORT_EN (0x1 << NAU8824_FEPGA_MODEL_SHORT_SFT)
388#define NAU8824_FEPGA_MODEL_MIC1_SFT 1
389#define NAU8824_FEPGA_MODEL_MIC1_EN (0x1 << NAU8824_FEPGA_MODEL_MIC1_SFT)
390#define NAU8824_FEPGA_MODEL_HSMIC_SFT 0
391#define NAU8824_FEPGA_MODEL_HSMIC_EN (0x1 << NAU8824_FEPGA_MODEL_HSMIC_SFT)
392
393/* FEPGA_II (0x78) */
394#define NAU8824_FEPGA_GAINR_SFT 5
395#define NAU8824_FEPGA_GAINR_MASK (0x1f << NAU8824_FEPGA_GAINR_SFT)
396#define NAU8824_FEPGA_GAINL_SFT 0
397#define NAU8824_FEPGA_GAINL_MASK 0x1f
398
399/* CHARGE_PUMP_CONTROL (0x80) */
400#define NAU8824_JAMNODCLOW (0x1 << 15)
401#define NAU8824_SPKR_PULL_DOWN (0x1 << 13)
402#define NAU8824_SPKL_PULL_DOWN (0x1 << 12)
403#define NAU8824_POWER_DOWN_DACR (0x1 << 9)
404#define NAU8824_POWER_DOWN_DACL (0x1 << 8)
405#define NAU8824_CHARGE_PUMP_EN_SFT 5
406#define NAU8824_CHARGE_PUMP_EN (0x1 << NAU8824_CHARGE_PUMP_EN_SFT)
407
408
409#define NAU8824_CODEC_DAI "nau8824-hifi"
410
411/* System Clock Source */
412enum {
413 NAU8824_CLK_DIS,
414 NAU8824_CLK_MCLK,
415 NAU8824_CLK_INTERNAL,
416 NAU8824_CLK_FLL_MCLK,
417 NAU8824_CLK_FLL_BLK,
418 NAU8824_CLK_FLL_FS,
419};
420
421struct nau8824 {
422 struct device *dev;
423 struct regmap *regmap;
424 struct snd_soc_dapm_context *dapm;
425 struct snd_soc_jack *jack;
426 struct work_struct jdet_work;
427 struct semaphore jd_sem;
428 int fs;
429 int irq;
430 int micbias_voltage;
431 int vref_impedance;
432 int jkdet_polarity;
433 int sar_threshold_num;
434 int sar_threshold[8];
435 int sar_hysteresis;
436 int sar_voltage;
437 int sar_compare_time;
438 int sar_sampling_time;
439 int key_debounce;
440 int jack_eject_debounce;
441};
442
443struct nau8824_fll {
444 int mclk_src;
445 int ratio;
446 int fll_frac;
447 int fll_int;
448 int clk_ref_div;
449};
450
451struct nau8824_fll_attr {
452 unsigned int param;
453 unsigned int val;
454};
455
456struct nau8824_osr_attr {
457 unsigned int osr;
458 unsigned int clk_src;
459};
460
461
462int nau8824_enable_jack_detect(struct snd_soc_codec *codec,
463 struct snd_soc_jack *jack);
464
465#endif /* _NAU8824_H */
466
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index b281a46d769d..f91221b1ddf0 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -1084,13 +1084,28 @@ static int rt5514_parse_dt(struct rt5514_priv *rt5514, struct device *dev)
1084 return 0; 1084 return 0;
1085} 1085}
1086 1086
1087static __maybe_unused int rt5514_i2c_resume(struct device *dev)
1088{
1089 struct rt5514_priv *rt5514 = dev_get_drvdata(dev);
1090 unsigned int val;
1091
1092 /*
1093 * Add a bogus read to avoid rt5514's confusion after s2r in case it
1094 * saw glitches on the i2c lines and thought the other side sent a
1095 * start bit.
1096 */
1097 regmap_read(rt5514->regmap, RT5514_VENDOR_ID2, &val);
1098
1099 return 0;
1100}
1101
1087static int rt5514_i2c_probe(struct i2c_client *i2c, 1102static int rt5514_i2c_probe(struct i2c_client *i2c,
1088 const struct i2c_device_id *id) 1103 const struct i2c_device_id *id)
1089{ 1104{
1090 struct rt5514_platform_data *pdata = dev_get_platdata(&i2c->dev); 1105 struct rt5514_platform_data *pdata = dev_get_platdata(&i2c->dev);
1091 struct rt5514_priv *rt5514; 1106 struct rt5514_priv *rt5514;
1092 int ret; 1107 int ret;
1093 unsigned int val; 1108 unsigned int val = ~0;
1094 1109
1095 rt5514 = devm_kzalloc(&i2c->dev, sizeof(struct rt5514_priv), 1110 rt5514 = devm_kzalloc(&i2c->dev, sizeof(struct rt5514_priv),
1096 GFP_KERNEL); 1111 GFP_KERNEL);
@@ -1120,8 +1135,16 @@ static int rt5514_i2c_probe(struct i2c_client *i2c,
1120 return ret; 1135 return ret;
1121 } 1136 }
1122 1137
1123 regmap_read(rt5514->regmap, RT5514_VENDOR_ID2, &val); 1138 /*
1124 if (val != RT5514_DEVICE_ID) { 1139 * The rt5514 can get confused if the i2c lines glitch together, as
1140 * can happen at bootup as regulators are turned off and on. If it's
1141 * in this glitched state the first i2c read will fail, so we'll give
1142 * it one change to retry.
1143 */
1144 ret = regmap_read(rt5514->regmap, RT5514_VENDOR_ID2, &val);
1145 if (ret || val != RT5514_DEVICE_ID)
1146 ret = regmap_read(rt5514->regmap, RT5514_VENDOR_ID2, &val);
1147 if (ret || val != RT5514_DEVICE_ID) {
1125 dev_err(&i2c->dev, 1148 dev_err(&i2c->dev,
1126 "Device with ID register %x is not rt5514\n", val); 1149 "Device with ID register %x is not rt5514\n", val);
1127 return -ENODEV; 1150 return -ENODEV;
@@ -1149,10 +1172,15 @@ static int rt5514_i2c_remove(struct i2c_client *i2c)
1149 return 0; 1172 return 0;
1150} 1173}
1151 1174
1152struct i2c_driver rt5514_i2c_driver = { 1175static const struct dev_pm_ops rt5514_i2_pm_ops = {
1176 SET_SYSTEM_SLEEP_PM_OPS(NULL, rt5514_i2c_resume)
1177};
1178
1179static struct i2c_driver rt5514_i2c_driver = {
1153 .driver = { 1180 .driver = {
1154 .name = "rt5514", 1181 .name = "rt5514",
1155 .of_match_table = of_match_ptr(rt5514_of_match), 1182 .of_match_table = of_match_ptr(rt5514_of_match),
1183 .pm = &rt5514_i2_pm_ops,
1156 }, 1184 },
1157 .probe = rt5514_i2c_probe, 1185 .probe = rt5514_i2c_probe,
1158 .remove = rt5514_i2c_remove, 1186 .remove = rt5514_i2c_remove,
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index e149f3ce5401..87844a45886a 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3542,6 +3542,15 @@ static const struct i2c_device_id rt5645_i2c_id[] = {
3542}; 3542};
3543MODULE_DEVICE_TABLE(i2c, rt5645_i2c_id); 3543MODULE_DEVICE_TABLE(i2c, rt5645_i2c_id);
3544 3544
3545#ifdef CONFIG_OF
3546static const struct of_device_id rt5645_of_match[] = {
3547 { .compatible = "realtek,rt5645", },
3548 { .compatible = "realtek,rt5650", },
3549 { }
3550};
3551MODULE_DEVICE_TABLE(of, rt5645_of_match);
3552#endif
3553
3545#ifdef CONFIG_ACPI 3554#ifdef CONFIG_ACPI
3546static const struct acpi_device_id rt5645_acpi_match[] = { 3555static const struct acpi_device_id rt5645_acpi_match[] = {
3547 { "10EC5645", 0 }, 3556 { "10EC5645", 0 },
@@ -3912,6 +3921,7 @@ static void rt5645_i2c_shutdown(struct i2c_client *i2c)
3912static struct i2c_driver rt5645_i2c_driver = { 3921static struct i2c_driver rt5645_i2c_driver = {
3913 .driver = { 3922 .driver = {
3914 .name = "rt5645", 3923 .name = "rt5645",
3924 .of_match_table = of_match_ptr(rt5645_of_match),
3915 .acpi_match_table = ACPI_PTR(rt5645_acpi_match), 3925 .acpi_match_table = ACPI_PTR(rt5645_acpi_match),
3916 }, 3926 },
3917 .probe = rt5645_i2c_probe, 3927 .probe = rt5645_i2c_probe,
diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c
index 476135ec5726..8cd22307f5b6 100644
--- a/sound/soc/codecs/rt5665.c
+++ b/sound/soc/codecs/rt5665.c
@@ -1139,7 +1139,8 @@ static void rt5665_enable_push_button_irq(struct snd_soc_codec *codec,
1139 bool enable) 1139 bool enable)
1140{ 1140{
1141 if (enable) { 1141 if (enable) {
1142 snd_soc_write(codec, RT5665_4BTN_IL_CMD_1, 0x000b); 1142 snd_soc_write(codec, RT5665_4BTN_IL_CMD_1, 0x0003);
1143 snd_soc_update_bits(codec, RT5665_SAR_IL_CMD_9, 0x1, 0x1);
1143 snd_soc_write(codec, RT5665_IL_CMD_1, 0x0048); 1144 snd_soc_write(codec, RT5665_IL_CMD_1, 0x0048);
1144 snd_soc_update_bits(codec, RT5665_4BTN_IL_CMD_2, 1145 snd_soc_update_bits(codec, RT5665_4BTN_IL_CMD_2,
1145 RT5665_4BTN_IL_MASK | RT5665_4BTN_IL_RST_MASK, 1146 RT5665_4BTN_IL_MASK | RT5665_4BTN_IL_RST_MASK,
@@ -1192,10 +1193,13 @@ static int rt5665_headset_detect(struct snd_soc_codec *codec, int jack_insert)
1192 } 1193 }
1193 1194
1194 regmap_update_bits(rt5665->regmap, RT5665_EJD_CTRL_1, 1195 regmap_update_bits(rt5665->regmap, RT5665_EJD_CTRL_1,
1195 0x180, 0x180); 1196 0x1a0, 0x120);
1196 regmap_write(rt5665->regmap, RT5665_EJD_CTRL_3, 0x3424); 1197 regmap_write(rt5665->regmap, RT5665_EJD_CTRL_3, 0x3424);
1198 regmap_write(rt5665->regmap, RT5665_IL_CMD_1, 0x0048);
1197 regmap_write(rt5665->regmap, RT5665_SAR_IL_CMD_1, 0xa291); 1199 regmap_write(rt5665->regmap, RT5665_SAR_IL_CMD_1, 0xa291);
1198 1200
1201 usleep_range(10000, 15000);
1202
1199 rt5665->sar_adc_value = snd_soc_read(rt5665->codec, 1203 rt5665->sar_adc_value = snd_soc_read(rt5665->codec,
1200 RT5665_SAR_IL_CMD_4) & 0x7ff; 1204 RT5665_SAR_IL_CMD_4) & 0x7ff;
1201 1205
@@ -1256,8 +1260,8 @@ static void rt5665_jd_check_handler(struct work_struct *work)
1256 } 1260 }
1257} 1261}
1258 1262
1259int rt5665_set_jack_detect(struct snd_soc_codec *codec, 1263static int rt5665_set_jack_detect(struct snd_soc_codec *codec,
1260 struct snd_soc_jack *hs_jack) 1264 struct snd_soc_jack *hs_jack, void *data)
1261{ 1265{
1262 struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec); 1266 struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
1263 1267
@@ -1284,7 +1288,6 @@ int rt5665_set_jack_detect(struct snd_soc_codec *codec,
1284 1288
1285 return 0; 1289 return 0;
1286} 1290}
1287EXPORT_SYMBOL_GPL(rt5665_set_jack_detect);
1288 1291
1289static void rt5665_jack_detect_handler(struct work_struct *work) 1292static void rt5665_jack_detect_handler(struct work_struct *work)
1290{ 1293{
@@ -2600,6 +2603,55 @@ static int rt5655_set_verf(struct snd_soc_dapm_widget *w,
2600 return 0; 2603 return 0;
2601} 2604}
2602 2605
2606static int rt5665_i2s_pin_event(struct snd_soc_dapm_widget *w,
2607 struct snd_kcontrol *kcontrol, int event)
2608{
2609 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
2610 unsigned int val1, val2, mask1, mask2 = 0;
2611
2612 switch (w->shift) {
2613 case RT5665_PWR_I2S2_1_BIT:
2614 mask1 = RT5665_GP2_PIN_MASK | RT5665_GP3_PIN_MASK |
2615 RT5665_GP4_PIN_MASK | RT5665_GP5_PIN_MASK;
2616 val1 = RT5665_GP2_PIN_BCLK2 | RT5665_GP3_PIN_LRCK2 |
2617 RT5665_GP4_PIN_DACDAT2_1 | RT5665_GP5_PIN_ADCDAT2_1;
2618 break;
2619 case RT5665_PWR_I2S2_2_BIT:
2620 mask1 = RT5665_GP2_PIN_MASK | RT5665_GP3_PIN_MASK |
2621 RT5665_GP8_PIN_MASK;
2622 val1 = RT5665_GP2_PIN_BCLK2 | RT5665_GP3_PIN_LRCK2 |
2623 RT5665_GP8_PIN_DACDAT2_2;
2624 mask2 = RT5665_GP9_PIN_MASK;
2625 val2 = RT5665_GP9_PIN_ADCDAT2_2;
2626 break;
2627 case RT5665_PWR_I2S3_BIT:
2628 mask1 = RT5665_GP6_PIN_MASK | RT5665_GP7_PIN_MASK |
2629 RT5665_GP8_PIN_MASK;
2630 val1 = RT5665_GP6_PIN_BCLK3 | RT5665_GP7_PIN_LRCK3 |
2631 RT5665_GP8_PIN_DACDAT3;
2632 mask2 = RT5665_GP9_PIN_MASK;
2633 val2 = RT5665_GP9_PIN_ADCDAT3;
2634 break;
2635 }
2636 switch (event) {
2637 case SND_SOC_DAPM_PRE_PMU:
2638 snd_soc_update_bits(codec, RT5665_GPIO_CTRL_1, mask1, val1);
2639 if (mask2)
2640 snd_soc_update_bits(codec, RT5665_GPIO_CTRL_2,
2641 mask2, val2);
2642 break;
2643 case SND_SOC_DAPM_POST_PMD:
2644 snd_soc_update_bits(codec, RT5665_GPIO_CTRL_1, mask1, 0);
2645 if (mask2)
2646 snd_soc_update_bits(codec, RT5665_GPIO_CTRL_2,
2647 mask2, 0);
2648 break;
2649 default:
2650 return 0;
2651 }
2652
2653 return 0;
2654}
2603 2655
2604static const struct snd_soc_dapm_widget rt5665_dapm_widgets[] = { 2656static const struct snd_soc_dapm_widget rt5665_dapm_widgets[] = {
2605 SND_SOC_DAPM_SUPPLY("LDO2", RT5665_PWR_ANLG_3, RT5665_PWR_LDO2_BIT, 0, 2657 SND_SOC_DAPM_SUPPLY("LDO2", RT5665_PWR_ANLG_3, RT5665_PWR_LDO2_BIT, 0,
@@ -2852,11 +2904,14 @@ static const struct snd_soc_dapm_widget rt5665_dapm_widgets[] = {
2852 SND_SOC_DAPM_SUPPLY("I2S1_2", RT5665_PWR_DIG_1, RT5665_PWR_I2S1_2_BIT, 2904 SND_SOC_DAPM_SUPPLY("I2S1_2", RT5665_PWR_DIG_1, RT5665_PWR_I2S1_2_BIT,
2853 0, NULL, 0), 2905 0, NULL, 0),
2854 SND_SOC_DAPM_SUPPLY("I2S2_1", RT5665_PWR_DIG_1, RT5665_PWR_I2S2_1_BIT, 2906 SND_SOC_DAPM_SUPPLY("I2S2_1", RT5665_PWR_DIG_1, RT5665_PWR_I2S2_1_BIT,
2855 0, NULL, 0), 2907 0, rt5665_i2s_pin_event, SND_SOC_DAPM_PRE_PMU |
2908 SND_SOC_DAPM_POST_PMD),
2856 SND_SOC_DAPM_SUPPLY("I2S2_2", RT5665_PWR_DIG_1, RT5665_PWR_I2S2_2_BIT, 2909 SND_SOC_DAPM_SUPPLY("I2S2_2", RT5665_PWR_DIG_1, RT5665_PWR_I2S2_2_BIT,
2857 0, NULL, 0), 2910 0, rt5665_i2s_pin_event, SND_SOC_DAPM_PRE_PMU |
2911 SND_SOC_DAPM_POST_PMD),
2858 SND_SOC_DAPM_SUPPLY("I2S3", RT5665_PWR_DIG_1, RT5665_PWR_I2S3_BIT, 2912 SND_SOC_DAPM_SUPPLY("I2S3", RT5665_PWR_DIG_1, RT5665_PWR_I2S3_BIT,
2859 0, NULL, 0), 2913 0, rt5665_i2s_pin_event, SND_SOC_DAPM_PRE_PMU |
2914 SND_SOC_DAPM_POST_PMD),
2860 SND_SOC_DAPM_PGA("IF1 DAC1", SND_SOC_NOPM, 0, 0, NULL, 0), 2915 SND_SOC_DAPM_PGA("IF1 DAC1", SND_SOC_NOPM, 0, 0, NULL, 0),
2861 SND_SOC_DAPM_PGA("IF1 DAC2", SND_SOC_NOPM, 0, 0, NULL, 0), 2916 SND_SOC_DAPM_PGA("IF1 DAC2", SND_SOC_NOPM, 0, 0, NULL, 0),
2862 SND_SOC_DAPM_PGA("IF1 DAC3", SND_SOC_NOPM, 0, 0, NULL, 0), 2917 SND_SOC_DAPM_PGA("IF1 DAC3", SND_SOC_NOPM, 0, 0, NULL, 0),
@@ -3963,12 +4018,68 @@ static const struct snd_soc_dapm_route rt5665_dapm_routes[] = {
3963 {"PDMR", NULL, "PDM R Playback"}, 4018 {"PDMR", NULL, "PDM R Playback"},
3964}; 4019};
3965 4020
4021static int rt5665_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
4022 unsigned int rx_mask, int slots, int slot_width)
4023{
4024 struct snd_soc_codec *codec = dai->codec;
4025 unsigned int val = 0;
4026
4027 if (rx_mask || tx_mask)
4028 val |= RT5665_I2S1_MODE_TDM;
4029
4030 switch (slots) {
4031 case 4:
4032 val |= RT5665_TDM_IN_CH_4;
4033 val |= RT5665_TDM_OUT_CH_4;
4034 break;
4035 case 6:
4036 val |= RT5665_TDM_IN_CH_6;
4037 val |= RT5665_TDM_OUT_CH_6;
4038 break;
4039 case 8:
4040 val |= RT5665_TDM_IN_CH_8;
4041 val |= RT5665_TDM_OUT_CH_8;
4042 break;
4043 case 2:
4044 break;
4045 default:
4046 return -EINVAL;
4047 }
4048
4049 switch (slot_width) {
4050 case 20:
4051 val |= RT5665_TDM_IN_LEN_20;
4052 val |= RT5665_TDM_OUT_LEN_20;
4053 break;
4054 case 24:
4055 val |= RT5665_TDM_IN_LEN_24;
4056 val |= RT5665_TDM_OUT_LEN_24;
4057 break;
4058 case 32:
4059 val |= RT5665_TDM_IN_LEN_32;
4060 val |= RT5665_TDM_OUT_LEN_32;
4061 break;
4062 case 16:
4063 break;
4064 default:
4065 return -EINVAL;
4066 }
4067
4068 snd_soc_update_bits(codec, RT5665_TDM_CTRL_1,
4069 RT5665_I2S1_MODE_MASK | RT5665_TDM_IN_CH_MASK |
4070 RT5665_TDM_OUT_CH_MASK | RT5665_TDM_IN_LEN_MASK |
4071 RT5665_TDM_OUT_LEN_MASK, val);
4072
4073 return 0;
4074}
4075
4076
3966static int rt5665_hw_params(struct snd_pcm_substream *substream, 4077static int rt5665_hw_params(struct snd_pcm_substream *substream,
3967 struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) 4078 struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
3968{ 4079{
3969 struct snd_soc_codec *codec = dai->codec; 4080 struct snd_soc_codec *codec = dai->codec;
3970 struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec); 4081 struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
3971 unsigned int val_len = 0, val_clk, mask_clk, val_bits = 0x0100; 4082 unsigned int val_len = 0, val_clk, reg_clk, mask_clk, val_bits = 0x0100;
3972 int pre_div, frame_size; 4083 int pre_div, frame_size;
3973 4084
3974 rt5665->lrck[dai->id] = params_rate(params); 4085 rt5665->lrck[dai->id] = params_rate(params);
@@ -4009,6 +4120,10 @@ static int rt5665_hw_params(struct snd_pcm_substream *substream,
4009 switch (dai->id) { 4120 switch (dai->id) {
4010 case RT5665_AIF1_1: 4121 case RT5665_AIF1_1:
4011 case RT5665_AIF1_2: 4122 case RT5665_AIF1_2:
4123 if (params_channels(params) > 2)
4124 rt5665_set_tdm_slot(dai, 0xf, 0xf,
4125 params_channels(params), params_width(params));
4126 reg_clk = RT5665_ADDA_CLK_1;
4012 mask_clk = RT5665_I2S_PD1_MASK; 4127 mask_clk = RT5665_I2S_PD1_MASK;
4013 val_clk = pre_div << RT5665_I2S_PD1_SFT; 4128 val_clk = pre_div << RT5665_I2S_PD1_SFT;
4014 snd_soc_update_bits(codec, RT5665_I2S1_SDP, 4129 snd_soc_update_bits(codec, RT5665_I2S1_SDP,
@@ -4016,12 +4131,14 @@ static int rt5665_hw_params(struct snd_pcm_substream *substream,
4016 break; 4131 break;
4017 case RT5665_AIF2_1: 4132 case RT5665_AIF2_1:
4018 case RT5665_AIF2_2: 4133 case RT5665_AIF2_2:
4134 reg_clk = RT5665_ADDA_CLK_2;
4019 mask_clk = RT5665_I2S_PD2_MASK; 4135 mask_clk = RT5665_I2S_PD2_MASK;
4020 val_clk = pre_div << RT5665_I2S_PD2_SFT; 4136 val_clk = pre_div << RT5665_I2S_PD2_SFT;
4021 snd_soc_update_bits(codec, RT5665_I2S2_SDP, 4137 snd_soc_update_bits(codec, RT5665_I2S2_SDP,
4022 RT5665_I2S_DL_MASK, val_len); 4138 RT5665_I2S_DL_MASK, val_len);
4023 break; 4139 break;
4024 case RT5665_AIF3: 4140 case RT5665_AIF3:
4141 reg_clk = RT5665_ADDA_CLK_2;
4025 mask_clk = RT5665_I2S_PD3_MASK; 4142 mask_clk = RT5665_I2S_PD3_MASK;
4026 val_clk = pre_div << RT5665_I2S_PD3_SFT; 4143 val_clk = pre_div << RT5665_I2S_PD3_SFT;
4027 snd_soc_update_bits(codec, RT5665_I2S3_SDP, 4144 snd_soc_update_bits(codec, RT5665_I2S3_SDP,
@@ -4032,7 +4149,7 @@ static int rt5665_hw_params(struct snd_pcm_substream *substream,
4032 return -EINVAL; 4149 return -EINVAL;
4033 } 4150 }
4034 4151
4035 snd_soc_update_bits(codec, RT5665_ADDA_CLK_1, mask_clk, val_clk); 4152 snd_soc_update_bits(codec, reg_clk, mask_clk, val_clk);
4036 snd_soc_update_bits(codec, RT5665_STO1_DAC_SIL_DET, 0x3700, val_bits); 4153 snd_soc_update_bits(codec, RT5665_STO1_DAC_SIL_DET, 0x3700, val_bits);
4037 4154
4038 switch (rt5665->lrck[dai->id]) { 4155 switch (rt5665->lrck[dai->id]) {
@@ -4125,10 +4242,9 @@ static int rt5665_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
4125 return 0; 4242 return 0;
4126} 4243}
4127 4244
4128static int rt5665_set_dai_sysclk(struct snd_soc_dai *dai, 4245static int rt5665_set_codec_sysclk(struct snd_soc_codec *codec, int clk_id,
4129 int clk_id, unsigned int freq, int dir) 4246 int source, unsigned int freq, int dir)
4130{ 4247{
4131 struct snd_soc_codec *codec = dai->codec;
4132 struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec); 4248 struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
4133 unsigned int reg_val = 0; 4249 unsigned int reg_val = 0;
4134 4250
@@ -4154,20 +4270,20 @@ static int rt5665_set_dai_sysclk(struct snd_soc_dai *dai,
4154 rt5665->sysclk = freq; 4270 rt5665->sysclk = freq;
4155 rt5665->sysclk_src = clk_id; 4271 rt5665->sysclk_src = clk_id;
4156 4272
4157 dev_dbg(dai->dev, "Sysclk is %dHz and clock id is %d\n", freq, clk_id); 4273 dev_dbg(codec->dev, "Sysclk is %dHz and clock id is %d\n", freq, clk_id);
4158 4274
4159 return 0; 4275 return 0;
4160} 4276}
4161 4277
4162static int rt5665_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int Source, 4278static int rt5665_set_codec_pll(struct snd_soc_codec *codec, int pll_id,
4163 unsigned int freq_in, unsigned int freq_out) 4279 int source, unsigned int freq_in,
4280 unsigned int freq_out)
4164{ 4281{
4165 struct snd_soc_codec *codec = dai->codec;
4166 struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec); 4282 struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
4167 struct rl6231_pll_code pll_code; 4283 struct rl6231_pll_code pll_code;
4168 int ret; 4284 int ret;
4169 4285
4170 if (Source == rt5665->pll_src && freq_in == rt5665->pll_in && 4286 if (source == rt5665->pll_src && freq_in == rt5665->pll_in &&
4171 freq_out == rt5665->pll_out) 4287 freq_out == rt5665->pll_out)
4172 return 0; 4288 return 0;
4173 4289
@@ -4181,7 +4297,7 @@ static int rt5665_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int Source,
4181 return 0; 4297 return 0;
4182 } 4298 }
4183 4299
4184 switch (Source) { 4300 switch (source) {
4185 case RT5665_PLL1_S_MCLK: 4301 case RT5665_PLL1_S_MCLK:
4186 snd_soc_update_bits(codec, RT5665_GLB_CLK, 4302 snd_soc_update_bits(codec, RT5665_GLB_CLK,
4187 RT5665_PLL1_SRC_MASK, RT5665_PLL1_SRC_MCLK); 4303 RT5665_PLL1_SRC_MASK, RT5665_PLL1_SRC_MCLK);
@@ -4199,7 +4315,7 @@ static int rt5665_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int Source,
4199 RT5665_PLL1_SRC_MASK, RT5665_PLL1_SRC_BCLK3); 4315 RT5665_PLL1_SRC_MASK, RT5665_PLL1_SRC_BCLK3);
4200 break; 4316 break;
4201 default: 4317 default:
4202 dev_err(codec->dev, "Unknown PLL Source %d\n", Source); 4318 dev_err(codec->dev, "Unknown PLL Source %d\n", source);
4203 return -EINVAL; 4319 return -EINVAL;
4204 } 4320 }
4205 4321
@@ -4221,62 +4337,7 @@ static int rt5665_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int Source,
4221 4337
4222 rt5665->pll_in = freq_in; 4338 rt5665->pll_in = freq_in;
4223 rt5665->pll_out = freq_out; 4339 rt5665->pll_out = freq_out;
4224 rt5665->pll_src = Source; 4340 rt5665->pll_src = source;
4225
4226 return 0;
4227}
4228
4229static int rt5665_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
4230 unsigned int rx_mask, int slots, int slot_width)
4231{
4232 struct snd_soc_codec *codec = dai->codec;
4233 unsigned int val = 0;
4234
4235 if (rx_mask || tx_mask)
4236 val |= RT5665_I2S1_MODE_TDM;
4237
4238 switch (slots) {
4239 case 4:
4240 val |= RT5665_TDM_IN_CH_4;
4241 val |= RT5665_TDM_OUT_CH_4;
4242 break;
4243 case 6:
4244 val |= RT5665_TDM_IN_CH_6;
4245 val |= RT5665_TDM_OUT_CH_6;
4246 break;
4247 case 8:
4248 val |= RT5665_TDM_IN_CH_8;
4249 val |= RT5665_TDM_OUT_CH_8;
4250 break;
4251 case 2:
4252 break;
4253 default:
4254 return -EINVAL;
4255 }
4256
4257 switch (slot_width) {
4258 case 20:
4259 val |= RT5665_TDM_IN_LEN_20;
4260 val |= RT5665_TDM_OUT_LEN_20;
4261 break;
4262 case 24:
4263 val |= RT5665_TDM_IN_LEN_24;
4264 val |= RT5665_TDM_OUT_LEN_24;
4265 break;
4266 case 32:
4267 val |= RT5665_TDM_IN_LEN_32;
4268 val |= RT5665_TDM_OUT_LEN_32;
4269 break;
4270 case 16:
4271 break;
4272 default:
4273 return -EINVAL;
4274 }
4275
4276 snd_soc_update_bits(codec, RT5665_TDM_CTRL_1,
4277 RT5665_I2S1_MODE_MASK | RT5665_TDM_IN_CH_MASK |
4278 RT5665_TDM_OUT_CH_MASK | RT5665_TDM_IN_LEN_MASK |
4279 RT5665_TDM_OUT_LEN_MASK, val);
4280 4341
4281 return 0; 4342 return 0;
4282} 4343}
@@ -4393,9 +4454,7 @@ static int rt5665_resume(struct snd_soc_codec *codec)
4393static const struct snd_soc_dai_ops rt5665_aif_dai_ops = { 4454static const struct snd_soc_dai_ops rt5665_aif_dai_ops = {
4394 .hw_params = rt5665_hw_params, 4455 .hw_params = rt5665_hw_params,
4395 .set_fmt = rt5665_set_dai_fmt, 4456 .set_fmt = rt5665_set_dai_fmt,
4396 .set_sysclk = rt5665_set_dai_sysclk,
4397 .set_tdm_slot = rt5665_set_tdm_slot, 4457 .set_tdm_slot = rt5665_set_tdm_slot,
4398 .set_pll = rt5665_set_dai_pll,
4399 .set_bclk_ratio = rt5665_set_bclk_ratio, 4458 .set_bclk_ratio = rt5665_set_bclk_ratio,
4400}; 4459};
4401 4460
@@ -4504,7 +4563,10 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5665 = {
4504 .num_dapm_widgets = ARRAY_SIZE(rt5665_dapm_widgets), 4563 .num_dapm_widgets = ARRAY_SIZE(rt5665_dapm_widgets),
4505 .dapm_routes = rt5665_dapm_routes, 4564 .dapm_routes = rt5665_dapm_routes,
4506 .num_dapm_routes = ARRAY_SIZE(rt5665_dapm_routes), 4565 .num_dapm_routes = ARRAY_SIZE(rt5665_dapm_routes),
4507 } 4566 },
4567 .set_sysclk = rt5665_set_codec_sysclk,
4568 .set_pll = rt5665_set_codec_pll,
4569 .set_jack = rt5665_set_jack_detect,
4508}; 4570};
4509 4571
4510 4572
@@ -4783,7 +4845,7 @@ static int rt5665_i2c_probe(struct i2c_client *i2c,
4783 4845
4784 regmap_write(rt5665->regmap, RT5665_HP_LOGIC_CTRL_2, 0x0002); 4846 regmap_write(rt5665->regmap, RT5665_HP_LOGIC_CTRL_2, 0x0002);
4785 regmap_update_bits(rt5665->regmap, RT5665_EJD_CTRL_1, 4847 regmap_update_bits(rt5665->regmap, RT5665_EJD_CTRL_1,
4786 0xf000 | RT5665_VREF_POW_MASK, 0xd000 | RT5665_VREF_POW_REG); 4848 0xf000 | RT5665_VREF_POW_MASK, 0xe000 | RT5665_VREF_POW_REG);
4787 /* Work around for pow_pump */ 4849 /* Work around for pow_pump */
4788 regmap_update_bits(rt5665->regmap, RT5665_STO1_DAC_SIL_DET, 4850 regmap_update_bits(rt5665->regmap, RT5665_STO1_DAC_SIL_DET,
4789 RT5665_DEB_STO_DAC_MASK, RT5665_DEB_80_MS); 4851 RT5665_DEB_STO_DAC_MASK, RT5665_DEB_80_MS);
diff --git a/sound/soc/codecs/rt5665.h b/sound/soc/codecs/rt5665.h
index a30f5e6d0628..1db5c6a62a8e 100644
--- a/sound/soc/codecs/rt5665.h
+++ b/sound/soc/codecs/rt5665.h
@@ -1984,7 +1984,5 @@ enum {
1984 1984
1985int rt5665_sel_asrc_clk_src(struct snd_soc_codec *codec, 1985int rt5665_sel_asrc_clk_src(struct snd_soc_codec *codec,
1986 unsigned int filter_mask, unsigned int clk_src); 1986 unsigned int filter_mask, unsigned int clk_src);
1987int rt5665_set_jack_detect(struct snd_soc_codec *codec,
1988 struct snd_soc_jack *hs_jack);
1989 1987
1990#endif /* __RT5665_H__ */ 1988#endif /* __RT5665_H__ */
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index 17d20b99f041..e27c5a4a0a15 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -2835,6 +2835,27 @@ static const struct dmi_system_id dmi_platform_intel_braswell[] = {
2835 DMI_MATCH(DMI_PRODUCT_NAME, "Wyse 3040"), 2835 DMI_MATCH(DMI_PRODUCT_NAME, "Wyse 3040"),
2836 }, 2836 },
2837 }, 2837 },
2838 {
2839 .ident = "Lenovo Thinkpad Tablet 10",
2840 .matches = {
2841 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2842 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 10"),
2843 },
2844 },
2845 {
2846 .ident = "Lenovo Thinkpad Tablet 10",
2847 .matches = {
2848 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2849 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Tablet B"),
2850 },
2851 },
2852 {
2853 .ident = "Lenovo Thinkpad Tablet 10",
2854 .matches = {
2855 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2856 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Miix 2 10"),
2857 },
2858 },
2838 {} 2859 {}
2839}; 2860};
2840 2861
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index abc802a5a479..65ac4518ad06 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -5035,6 +5035,12 @@ static const struct i2c_device_id rt5677_i2c_id[] = {
5035}; 5035};
5036MODULE_DEVICE_TABLE(i2c, rt5677_i2c_id); 5036MODULE_DEVICE_TABLE(i2c, rt5677_i2c_id);
5037 5037
5038static const struct of_device_id rt5677_of_match[] = {
5039 { .compatible = "realtek,rt5677", },
5040 { }
5041};
5042MODULE_DEVICE_TABLE(of, rt5677_of_match);
5043
5038static const struct acpi_gpio_params plug_det_gpio = { RT5677_GPIO_PLUG_DET, 0, false }; 5044static const struct acpi_gpio_params plug_det_gpio = { RT5677_GPIO_PLUG_DET, 0, false };
5039static const struct acpi_gpio_params mic_present_gpio = { RT5677_GPIO_MIC_PRESENT_L, 0, false }; 5045static const struct acpi_gpio_params mic_present_gpio = { RT5677_GPIO_MIC_PRESENT_L, 0, false };
5040static const struct acpi_gpio_params headphone_enable_gpio = { RT5677_GPIO_HP_AMP_SHDN_L, 0, false }; 5046static const struct acpi_gpio_params headphone_enable_gpio = { RT5677_GPIO_HP_AMP_SHDN_L, 0, false };
@@ -5294,6 +5300,7 @@ static int rt5677_i2c_remove(struct i2c_client *i2c)
5294static struct i2c_driver rt5677_i2c_driver = { 5300static struct i2c_driver rt5677_i2c_driver = {
5295 .driver = { 5301 .driver = {
5296 .name = "rt5677", 5302 .name = "rt5677",
5303 .of_match_table = rt5677_of_match,
5297 }, 5304 },
5298 .probe = rt5677_i2c_probe, 5305 .probe = rt5677_i2c_probe,
5299 .remove = rt5677_i2c_remove, 5306 .remove = rt5677_i2c_remove,
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 1589325855bc..5a2702edeb77 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -99,6 +99,13 @@ enum sgtl5000_micbias_resistor {
99 SGTL5000_MICBIAS_8K = 8, 99 SGTL5000_MICBIAS_8K = 8,
100}; 100};
101 101
102enum {
103 I2S_LRCLK_STRENGTH_DISABLE,
104 I2S_LRCLK_STRENGTH_LOW,
105 I2S_LRCLK_STRENGTH_MEDIUM,
106 I2S_LRCLK_STRENGTH_HIGH,
107};
108
102/* sgtl5000 private structure in codec */ 109/* sgtl5000 private structure in codec */
103struct sgtl5000_priv { 110struct sgtl5000_priv {
104 int sysclk; /* sysclk rate */ 111 int sysclk; /* sysclk rate */
@@ -111,6 +118,7 @@ struct sgtl5000_priv {
111 int revision; 118 int revision;
112 u8 micbias_resistor; 119 u8 micbias_resistor;
113 u8 micbias_voltage; 120 u8 micbias_voltage;
121 u8 lrclk_strength;
114}; 122};
115 123
116/* 124/*
@@ -1089,6 +1097,7 @@ static int sgtl5000_enable_regulators(struct i2c_client *client)
1089static int sgtl5000_probe(struct snd_soc_codec *codec) 1097static int sgtl5000_probe(struct snd_soc_codec *codec)
1090{ 1098{
1091 int ret; 1099 int ret;
1100 u16 reg;
1092 struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec); 1101 struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
1093 1102
1094 /* power up sgtl5000 */ 1103 /* power up sgtl5000 */
@@ -1118,7 +1127,8 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
1118 SGTL5000_DAC_MUTE_RIGHT | 1127 SGTL5000_DAC_MUTE_RIGHT |
1119 SGTL5000_DAC_MUTE_LEFT); 1128 SGTL5000_DAC_MUTE_LEFT);
1120 1129
1121 snd_soc_write(codec, SGTL5000_CHIP_PAD_STRENGTH, 0x015f); 1130 reg = ((sgtl5000->lrclk_strength) << SGTL5000_PAD_I2S_LRCLK_SHIFT | 0x5f);
1131 snd_soc_write(codec, SGTL5000_CHIP_PAD_STRENGTH, reg);
1122 1132
1123 snd_soc_write(codec, SGTL5000_CHIP_ANA_CTRL, 1133 snd_soc_write(codec, SGTL5000_CHIP_ANA_CTRL,
1124 SGTL5000_HP_ZCD_EN | 1134 SGTL5000_HP_ZCD_EN |
@@ -1347,6 +1357,13 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
1347 } 1357 }
1348 } 1358 }
1349 1359
1360 sgtl5000->lrclk_strength = I2S_LRCLK_STRENGTH_LOW;
1361 if (!of_property_read_u32(np, "lrclk-strength", &value)) {
1362 if (value > I2S_LRCLK_STRENGTH_HIGH)
1363 value = I2S_LRCLK_STRENGTH_LOW;
1364 sgtl5000->lrclk_strength = value;
1365 }
1366
1350 /* Ensure sgtl5000 will start with sane register values */ 1367 /* Ensure sgtl5000 will start with sane register values */
1351 sgtl5000_fill_defaults(client); 1368 sgtl5000_fill_defaults(client);
1352 1369
diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
index 2bb5a11c9ba1..a622623e8558 100644
--- a/sound/soc/codecs/ssm4567.c
+++ b/sound/soc/codecs/ssm4567.c
@@ -485,6 +485,14 @@ static const struct i2c_device_id ssm4567_i2c_ids[] = {
485}; 485};
486MODULE_DEVICE_TABLE(i2c, ssm4567_i2c_ids); 486MODULE_DEVICE_TABLE(i2c, ssm4567_i2c_ids);
487 487
488#ifdef CONFIG_OF
489static const struct of_device_id ssm4567_of_match[] = {
490 { .compatible = "adi,ssm4567", },
491 { }
492};
493MODULE_DEVICE_TABLE(of, ssm4567_of_match);
494#endif
495
488#ifdef CONFIG_ACPI 496#ifdef CONFIG_ACPI
489 497
490static const struct acpi_device_id ssm4567_acpi_match[] = { 498static const struct acpi_device_id ssm4567_acpi_match[] = {
@@ -498,6 +506,7 @@ MODULE_DEVICE_TABLE(acpi, ssm4567_acpi_match);
498static struct i2c_driver ssm4567_driver = { 506static struct i2c_driver ssm4567_driver = {
499 .driver = { 507 .driver = {
500 .name = "ssm4567", 508 .name = "ssm4567",
509 .of_match_table = of_match_ptr(ssm4567_of_match),
501 .acpi_match_table = ACPI_PTR(ssm4567_acpi_match), 510 .acpi_match_table = ACPI_PTR(ssm4567_acpi_match),
502 }, 511 },
503 .probe = ssm4567_i2c_probe, 512 .probe = ssm4567_i2c_probe,
diff --git a/sound/soc/codecs/sta529.c b/sound/soc/codecs/sta529.c
index d4b384e4b266..660734359bf3 100644
--- a/sound/soc/codecs/sta529.c
+++ b/sound/soc/codecs/sta529.c
@@ -375,9 +375,16 @@ static const struct i2c_device_id sta529_i2c_id[] = {
375}; 375};
376MODULE_DEVICE_TABLE(i2c, sta529_i2c_id); 376MODULE_DEVICE_TABLE(i2c, sta529_i2c_id);
377 377
378static const struct of_device_id sta529_of_match[] = {
379 { .compatible = "st,sta529", },
380 { }
381};
382MODULE_DEVICE_TABLE(of, sta529_of_match);
383
378static struct i2c_driver sta529_i2c_driver = { 384static struct i2c_driver sta529_i2c_driver = {
379 .driver = { 385 .driver = {
380 .name = "sta529", 386 .name = "sta529",
387 .of_match_table = sta529_of_match,
381 }, 388 },
382 .probe = sta529_i2c_probe, 389 .probe = sta529_i2c_probe,
383 .remove = sta529_i2c_remove, 390 .remove = sta529_i2c_remove,
diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c
index baf455e8c2f7..8840f72f3c4a 100644
--- a/sound/soc/codecs/tas2552.c
+++ b/sound/soc/codecs/tas2552.c
@@ -611,7 +611,7 @@ probe_fail:
611 611
612 regulator_bulk_disable(ARRAY_SIZE(tas2552->supplies), 612 regulator_bulk_disable(ARRAY_SIZE(tas2552->supplies),
613 tas2552->supplies); 613 tas2552->supplies);
614 return -EIO; 614 return ret;
615} 615}
616 616
617static int tas2552_codec_remove(struct snd_soc_codec *codec) 617static int tas2552_codec_remove(struct snd_soc_codec *codec)
@@ -637,7 +637,7 @@ static int tas2552_suspend(struct snd_soc_codec *codec)
637 if (ret != 0) 637 if (ret != 0)
638 dev_err(codec->dev, "Failed to disable supplies: %d\n", 638 dev_err(codec->dev, "Failed to disable supplies: %d\n",
639 ret); 639 ret);
640 return 0; 640 return ret;
641} 641}
642 642
643static int tas2552_resume(struct snd_soc_codec *codec) 643static int tas2552_resume(struct snd_soc_codec *codec)
@@ -653,7 +653,7 @@ static int tas2552_resume(struct snd_soc_codec *codec)
653 ret); 653 ret);
654 } 654 }
655 655
656 return 0; 656 return ret;
657} 657}
658#else 658#else
659#define tas2552_suspend NULL 659#define tas2552_suspend NULL
diff --git a/sound/soc/codecs/tlv320aic23.c b/sound/soc/codecs/tlv320aic23.c
index 410cae0f2060..628a8eeaab68 100644
--- a/sound/soc/codecs/tlv320aic23.c
+++ b/sound/soc/codecs/tlv320aic23.c
@@ -174,10 +174,9 @@ static const struct snd_soc_dapm_route tlv320aic23_intercon[] = {
174 {"ROUT", NULL, "Output Mixer"}, 174 {"ROUT", NULL, "Output Mixer"},
175 175
176 /* Inputs */ 176 /* Inputs */
177 {"Line Input", "NULL", "LLINEIN"}, 177 {"Line Input", NULL, "LLINEIN"},
178 {"Line Input", "NULL", "RLINEIN"}, 178 {"Line Input", NULL, "RLINEIN"},
179 179 {"Mic Input", NULL, "MICIN"},
180 {"Mic Input", "NULL", "MICIN"},
181 180
182 /* input mux */ 181 /* input mux */
183 {"Capture Source", "Line", "Line Input"}, 182 {"Capture Source", "Line", "Line Input"},
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index 748036e851ea..2b6ad09e0886 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -606,6 +606,14 @@ static const struct snd_kcontrol_new twl6040_snd_controls[] = {
606 twl6040_headset_power_get_enum, 606 twl6040_headset_power_get_enum,
607 twl6040_headset_power_put_enum), 607 twl6040_headset_power_put_enum),
608 608
609 /* Left HS PDM data routed to Right HSDAC */
610 SOC_SINGLE("Headset Mono to Stereo Playback Switch",
611 TWL6040_REG_HSRCTL, 7, 1, 0),
612
613 /* Left HF PDM data routed to Right HFDAC */
614 SOC_SINGLE("Handsfree Mono to Stereo Playback Switch",
615 TWL6040_REG_HFRCTL, 5, 1, 0),
616
609 SOC_ENUM_EXT("PLL Selection", twl6040_power_mode_enum, 617 SOC_ENUM_EXT("PLL Selection", twl6040_power_mode_enum,
610 twl6040_pll_get_enum, twl6040_pll_put_enum), 618 twl6040_pll_get_enum, twl6040_pll_put_enum),
611}; 619};
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index 2918fdb95e58..61cdc79840e7 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -791,9 +791,16 @@ static const struct i2c_device_id uda1380_i2c_id[] = {
791}; 791};
792MODULE_DEVICE_TABLE(i2c, uda1380_i2c_id); 792MODULE_DEVICE_TABLE(i2c, uda1380_i2c_id);
793 793
794static const struct of_device_id uda1380_of_match[] = {
795 { .compatible = "nxp,uda1380", },
796 { }
797};
798MODULE_DEVICE_TABLE(of, uda1380_of_match);
799
794static struct i2c_driver uda1380_i2c_driver = { 800static struct i2c_driver uda1380_i2c_driver = {
795 .driver = { 801 .driver = {
796 .name = "uda1380-codec", 802 .name = "uda1380-codec",
803 .of_match_table = uda1380_of_match,
797 }, 804 },
798 .probe = uda1380_i2c_probe, 805 .probe = uda1380_i2c_probe,
799 .remove = uda1380_i2c_remove, 806 .remove = uda1380_i2c_remove,
diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
index 560575000cc5..138a84efdd54 100644
--- a/sound/soc/codecs/wm5100.c
+++ b/sound/soc/codecs/wm5100.c
@@ -2014,7 +2014,7 @@ static void wm5100_micd_irq(struct wm5100_priv *wm5100)
2014 2014
2015 ret = regmap_read(wm5100->regmap, WM5100_MIC_DETECT_3, &val); 2015 ret = regmap_read(wm5100->regmap, WM5100_MIC_DETECT_3, &val);
2016 if (ret != 0) { 2016 if (ret != 0) {
2017 dev_err(wm5100->dev, "Failed to read micropone status: %d\n", 2017 dev_err(wm5100->dev, "Failed to read microphone status: %d\n",
2018 ret); 2018 ret);
2019 return; 2019 return;
2020 } 2020 }
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index 6e887c2c42b1..237eeb9a8b97 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -24,6 +24,7 @@
24#include <linux/pm.h> 24#include <linux/pm.h>
25#include <linux/i2c.h> 25#include <linux/i2c.h>
26#include <linux/regmap.h> 26#include <linux/regmap.h>
27#include <linux/regulator/consumer.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
28#include <linux/irq.h> 29#include <linux/irq.h>
29#include <linux/mutex.h> 30#include <linux/mutex.h>
@@ -115,10 +116,19 @@ static const struct reg_default wm8903_reg_defaults[] = {
115 { 172, 0x0000 }, /* R172 - Analogue Output Bias 0 */ 116 { 172, 0x0000 }, /* R172 - Analogue Output Bias 0 */
116}; 117};
117 118
119#define WM8903_NUM_SUPPLIES 4
120static const char *wm8903_supply_names[WM8903_NUM_SUPPLIES] = {
121 "AVDD",
122 "CPVDD",
123 "DBVDD",
124 "DCVDD",
125};
126
118struct wm8903_priv { 127struct wm8903_priv {
119 struct wm8903_platform_data *pdata; 128 struct wm8903_platform_data *pdata;
120 struct device *dev; 129 struct device *dev;
121 struct regmap *regmap; 130 struct regmap *regmap;
131 struct regulator_bulk_data supplies[WM8903_NUM_SUPPLIES];
122 132
123 int sysclk; 133 int sysclk;
124 int irq; 134 int irq;
@@ -2030,6 +2040,23 @@ static int wm8903_i2c_probe(struct i2c_client *i2c,
2030 2040
2031 pdata = wm8903->pdata; 2041 pdata = wm8903->pdata;
2032 2042
2043 for (i = 0; i < ARRAY_SIZE(wm8903->supplies); i++)
2044 wm8903->supplies[i].supply = wm8903_supply_names[i];
2045
2046 ret = devm_regulator_bulk_get(&i2c->dev, ARRAY_SIZE(wm8903->supplies),
2047 wm8903->supplies);
2048 if (ret != 0) {
2049 dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret);
2050 return ret;
2051 }
2052
2053 ret = regulator_bulk_enable(ARRAY_SIZE(wm8903->supplies),
2054 wm8903->supplies);
2055 if (ret != 0) {
2056 dev_err(&i2c->dev, "Failed to enable supplies: %d\n", ret);
2057 return ret;
2058 }
2059
2033 ret = regmap_read(wm8903->regmap, WM8903_SW_RESET_AND_ID, &val); 2060 ret = regmap_read(wm8903->regmap, WM8903_SW_RESET_AND_ID, &val);
2034 if (ret != 0) { 2061 if (ret != 0) {
2035 dev_err(&i2c->dev, "Failed to read chip ID: %d\n", ret); 2062 dev_err(&i2c->dev, "Failed to read chip ID: %d\n", ret);
@@ -2160,6 +2187,8 @@ static int wm8903_i2c_probe(struct i2c_client *i2c,
2160 2187
2161 return 0; 2188 return 0;
2162err: 2189err:
2190 regulator_bulk_disable(ARRAY_SIZE(wm8903->supplies),
2191 wm8903->supplies);
2163 return ret; 2192 return ret;
2164} 2193}
2165 2194
@@ -2167,6 +2196,8 @@ static int wm8903_i2c_remove(struct i2c_client *client)
2167{ 2196{
2168 struct wm8903_priv *wm8903 = i2c_get_clientdata(client); 2197 struct wm8903_priv *wm8903 = i2c_get_clientdata(client);
2169 2198
2199 regulator_bulk_disable(ARRAY_SIZE(wm8903->supplies),
2200 wm8903->supplies);
2170 if (client->irq) 2201 if (client->irq)
2171 free_irq(client->irq, wm8903); 2202 free_irq(client->irq, wm8903);
2172 wm8903_free_gpio(wm8903); 2203 wm8903_free_gpio(wm8903);
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index 3bf081a7e450..9ed455700954 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -604,12 +604,150 @@ static const int bclk_divs[] = {
604 120, 160, 220, 240, 320, 320, 320 604 120, 160, 220, 240, 320, 320, 320
605}; 605};
606 606
607/**
608 * wm8960_configure_sysclk - checks if there is a sysclk frequency available
609 * The sysclk must be chosen such that:
610 * - sysclk = MCLK / sysclk_divs
611 * - lrclk = sysclk / dac_divs
612 * - 10 * bclk = sysclk / bclk_divs
613 *
614 * If we cannot find an exact match for (sysclk, lrclk, bclk)
615 * triplet, we relax the bclk such that bclk is chosen as the
616 * closest available frequency greater than expected bclk.
617 *
618 * @wm8960_priv: wm8960 codec private data
619 * @mclk: MCLK used to derive sysclk
620 * @sysclk_idx: sysclk_divs index for found sysclk
621 * @dac_idx: dac_divs index for found lrclk
622 * @bclk_idx: bclk_divs index for found bclk
623 *
624 * Returns:
625 * -1, in case no sysclk frequency available found
626 * >=0, in case we could derive bclk and lrclk from sysclk using
627 * (@sysclk_idx, @dac_idx, @bclk_idx) dividers
628 */
629static
630int wm8960_configure_sysclk(struct wm8960_priv *wm8960, int mclk,
631 int *sysclk_idx, int *dac_idx, int *bclk_idx)
632{
633 int sysclk, bclk, lrclk;
634 int i, j, k;
635 int diff, closest = mclk;
636
637 /* marker for no match */
638 *bclk_idx = -1;
639
640 bclk = wm8960->bclk;
641 lrclk = wm8960->lrclk;
642
643 /* check if the sysclk frequency is available. */
644 for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
645 if (sysclk_divs[i] == -1)
646 continue;
647 sysclk = mclk / sysclk_divs[i];
648 for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
649 if (sysclk != dac_divs[j] * lrclk)
650 continue;
651 for (k = 0; k < ARRAY_SIZE(bclk_divs); ++k) {
652 diff = sysclk - bclk * bclk_divs[k] / 10;
653 if (diff == 0) {
654 *sysclk_idx = i;
655 *dac_idx = j;
656 *bclk_idx = k;
657 break;
658 }
659 if (diff > 0 && closest > diff) {
660 *sysclk_idx = i;
661 *dac_idx = j;
662 *bclk_idx = k;
663 closest = diff;
664 }
665 }
666 if (k != ARRAY_SIZE(bclk_divs))
667 break;
668 }
669 if (j != ARRAY_SIZE(dac_divs))
670 break;
671 }
672 return *bclk_idx;
673}
674
675/**
676 * wm8960_configure_pll - checks if there is a PLL out frequency available
677 * The PLL out frequency must be chosen such that:
678 * - sysclk = lrclk * dac_divs
679 * - freq_out = sysclk * sysclk_divs
680 * - 10 * sysclk = bclk * bclk_divs
681 *
682 * If we cannot find an exact match for (sysclk, lrclk, bclk)
683 * triplet, we relax the bclk such that bclk is chosen as the
684 * closest available frequency greater than expected bclk.
685 *
686 * @codec: codec structure
687 * @freq_in: input frequency used to derive freq out via PLL
688 * @sysclk_idx: sysclk_divs index for found sysclk
689 * @dac_idx: dac_divs index for found lrclk
690 * @bclk_idx: bclk_divs index for found bclk
691 *
692 * Returns:
693 * < 0, in case no PLL frequency out available was found
694 * >=0, in case we could derive bclk, lrclk, sysclk from PLL out using
695 * (@sysclk_idx, @dac_idx, @bclk_idx) dividers
696 */
697static
698int wm8960_configure_pll(struct snd_soc_codec *codec, int freq_in,
699 int *sysclk_idx, int *dac_idx, int *bclk_idx)
700{
701 struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
702 int sysclk, bclk, lrclk, freq_out;
703 int diff, closest, best_freq_out;
704 int i, j, k;
705
706 bclk = wm8960->bclk;
707 lrclk = wm8960->lrclk;
708 closest = freq_in;
709
710 best_freq_out = -EINVAL;
711 *sysclk_idx = *dac_idx = *bclk_idx = -1;
712
713 for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
714 if (sysclk_divs[i] == -1)
715 continue;
716 for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
717 sysclk = lrclk * dac_divs[j];
718 freq_out = sysclk * sysclk_divs[i];
719
720 for (k = 0; k < ARRAY_SIZE(bclk_divs); ++k) {
721 if (!is_pll_freq_available(freq_in, freq_out))
722 continue;
723
724 diff = sysclk - bclk * bclk_divs[k] / 10;
725 if (diff == 0) {
726 *sysclk_idx = i;
727 *dac_idx = j;
728 *bclk_idx = k;
729 return freq_out;
730 }
731 if (diff > 0 && closest > diff) {
732 *sysclk_idx = i;
733 *dac_idx = j;
734 *bclk_idx = k;
735 closest = diff;
736 best_freq_out = freq_out;
737 }
738 }
739 }
740 }
741
742 return best_freq_out;
743}
607static int wm8960_configure_clocking(struct snd_soc_codec *codec) 744static int wm8960_configure_clocking(struct snd_soc_codec *codec)
608{ 745{
609 struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec); 746 struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
610 int sysclk, bclk, lrclk, freq_out, freq_in; 747 int freq_out, freq_in;
611 u16 iface1 = snd_soc_read(codec, WM8960_IFACE1); 748 u16 iface1 = snd_soc_read(codec, WM8960_IFACE1);
612 int i, j, k; 749 int i, j, k;
750 int ret;
613 751
614 if (!(iface1 & (1<<6))) { 752 if (!(iface1 & (1<<6))) {
615 dev_dbg(codec->dev, 753 dev_dbg(codec->dev,
@@ -623,8 +761,6 @@ static int wm8960_configure_clocking(struct snd_soc_codec *codec)
623 } 761 }
624 762
625 freq_in = wm8960->freq_in; 763 freq_in = wm8960->freq_in;
626 bclk = wm8960->bclk;
627 lrclk = wm8960->lrclk;
628 /* 764 /*
629 * If it's sysclk auto mode, check if the MCLK can provide sysclk or 765 * If it's sysclk auto mode, check if the MCLK can provide sysclk or
630 * not. If MCLK can provide sysclk, using MCLK to provide sysclk 766 * not. If MCLK can provide sysclk, using MCLK to provide sysclk
@@ -643,60 +779,21 @@ static int wm8960_configure_clocking(struct snd_soc_codec *codec)
643 } 779 }
644 780
645 if (wm8960->clk_id != WM8960_SYSCLK_PLL) { 781 if (wm8960->clk_id != WM8960_SYSCLK_PLL) {
646 /* check if the sysclk frequency is available. */ 782 ret = wm8960_configure_sysclk(wm8960, freq_out, &i, &j, &k);
647 for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) { 783 if (ret >= 0) {
648 if (sysclk_divs[i] == -1)
649 continue;
650 sysclk = freq_out / sysclk_divs[i];
651 for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
652 if (sysclk != dac_divs[j] * lrclk)
653 continue;
654 for (k = 0; k < ARRAY_SIZE(bclk_divs); ++k)
655 if (sysclk == bclk * bclk_divs[k] / 10)
656 break;
657 if (k != ARRAY_SIZE(bclk_divs))
658 break;
659 }
660 if (j != ARRAY_SIZE(dac_divs))
661 break;
662 }
663
664 if (i != ARRAY_SIZE(sysclk_divs)) {
665 goto configure_clock; 784 goto configure_clock;
666 } else if (wm8960->clk_id != WM8960_SYSCLK_AUTO) { 785 } else if (wm8960->clk_id != WM8960_SYSCLK_AUTO) {
667 dev_err(codec->dev, "failed to configure clock\n"); 786 dev_err(codec->dev, "failed to configure clock\n");
668 return -EINVAL; 787 return -EINVAL;
669 } 788 }
670 } 789 }
671 /* get a available pll out frequency and set pll */
672 for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
673 if (sysclk_divs[i] == -1)
674 continue;
675 for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
676 sysclk = lrclk * dac_divs[j];
677 freq_out = sysclk * sysclk_divs[i];
678 790
679 for (k = 0; k < ARRAY_SIZE(bclk_divs); ++k) { 791 freq_out = wm8960_configure_pll(codec, freq_in, &i, &j, &k);
680 if (sysclk == bclk * bclk_divs[k] / 10 && 792 if (freq_out < 0) {
681 is_pll_freq_available(freq_in, freq_out)) { 793 dev_err(codec->dev, "failed to configure clock via PLL\n");
682 wm8960_set_pll(codec, 794 return freq_out;
683 freq_in, freq_out);
684 break;
685 } else {
686 continue;
687 }
688 }
689 if (k != ARRAY_SIZE(bclk_divs))
690 break;
691 }
692 if (j != ARRAY_SIZE(dac_divs))
693 break;
694 }
695
696 if (i == ARRAY_SIZE(sysclk_divs)) {
697 dev_err(codec->dev, "failed to configure clock\n");
698 return -EINVAL;
699 } 795 }
796 wm8960_set_pll(codec, freq_in, freq_out);
700 797
701configure_clock: 798configure_clock:
702 /* configure sysclk clock */ 799 /* configure sysclk clock */
diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c
index 90b2d418ef60..cf761e2d7546 100644
--- a/sound/soc/codecs/wm8978.c
+++ b/sound/soc/codecs/wm8978.c
@@ -1071,9 +1071,16 @@ static const struct i2c_device_id wm8978_i2c_id[] = {
1071}; 1071};
1072MODULE_DEVICE_TABLE(i2c, wm8978_i2c_id); 1072MODULE_DEVICE_TABLE(i2c, wm8978_i2c_id);
1073 1073
1074static const struct of_device_id wm8978_of_match[] = {
1075 { .compatible = "wlf,wm8978", },
1076 { }
1077};
1078MODULE_DEVICE_TABLE(of, wm8978_of_match);
1079
1074static struct i2c_driver wm8978_i2c_driver = { 1080static struct i2c_driver wm8978_i2c_driver = {
1075 .driver = { 1081 .driver = {
1076 .name = "wm8978", 1082 .name = "wm8978",
1083 .of_match_table = wm8978_of_match,
1077 }, 1084 },
1078 .probe = wm8978_i2c_probe, 1085 .probe = wm8978_i2c_probe,
1079 .remove = wm8978_i2c_remove, 1086 .remove = wm8978_i2c_remove,
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index bbdb72f73df1..20695b691aff 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -112,17 +112,22 @@
112#define ADSP1_CLK_SEL_SHIFT 0 /* CLK_SEL_ENA */ 112#define ADSP1_CLK_SEL_SHIFT 0 /* CLK_SEL_ENA */
113#define ADSP1_CLK_SEL_WIDTH 3 /* CLK_SEL_ENA */ 113#define ADSP1_CLK_SEL_WIDTH 3 /* CLK_SEL_ENA */
114 114
115#define ADSP2_CONTROL 0x0 115#define ADSP2_CONTROL 0x0
116#define ADSP2_CLOCKING 0x1 116#define ADSP2_CLOCKING 0x1
117#define ADSP2_STATUS1 0x4 117#define ADSP2V2_CLOCKING 0x2
118#define ADSP2_WDMA_CONFIG_1 0x30 118#define ADSP2_STATUS1 0x4
119#define ADSP2_WDMA_CONFIG_2 0x31 119#define ADSP2_WDMA_CONFIG_1 0x30
120#define ADSP2_RDMA_CONFIG_1 0x34 120#define ADSP2_WDMA_CONFIG_2 0x31
121 121#define ADSP2V2_WDMA_CONFIG_2 0x32
122#define ADSP2_SCRATCH0 0x40 122#define ADSP2_RDMA_CONFIG_1 0x34
123#define ADSP2_SCRATCH1 0x41 123
124#define ADSP2_SCRATCH2 0x42 124#define ADSP2_SCRATCH0 0x40
125#define ADSP2_SCRATCH3 0x43 125#define ADSP2_SCRATCH1 0x41
126#define ADSP2_SCRATCH2 0x42
127#define ADSP2_SCRATCH3 0x43
128
129#define ADSP2V2_SCRATCH0_1 0x40
130#define ADSP2V2_SCRATCH2_3 0x42
126 131
127/* 132/*
128 * ADSP2 Control 133 * ADSP2 Control
@@ -153,6 +158,17 @@
153#define ADSP2_CLK_SEL_WIDTH 3 /* CLK_SEL_ENA */ 158#define ADSP2_CLK_SEL_WIDTH 3 /* CLK_SEL_ENA */
154 159
155/* 160/*
161 * ADSP2V2 clocking
162 */
163#define ADSP2V2_CLK_SEL_MASK 0x70000 /* CLK_SEL_ENA */
164#define ADSP2V2_CLK_SEL_SHIFT 16 /* CLK_SEL_ENA */
165#define ADSP2V2_CLK_SEL_WIDTH 3 /* CLK_SEL_ENA */
166
167#define ADSP2V2_RATE_MASK 0x7800 /* DSP_RATE */
168#define ADSP2V2_RATE_SHIFT 11 /* DSP_RATE */
169#define ADSP2V2_RATE_WIDTH 4 /* DSP_RATE */
170
171/*
156 * ADSP2 Status 1 172 * ADSP2 Status 1
157 */ 173 */
158#define ADSP2_RAM_RDY 0x0001 174#define ADSP2_RAM_RDY 0x0001
@@ -160,6 +176,37 @@
160#define ADSP2_RAM_RDY_SHIFT 0 176#define ADSP2_RAM_RDY_SHIFT 0
161#define ADSP2_RAM_RDY_WIDTH 1 177#define ADSP2_RAM_RDY_WIDTH 1
162 178
179/*
180 * ADSP2 Lock support
181 */
182#define ADSP2_LOCK_CODE_0 0x5555
183#define ADSP2_LOCK_CODE_1 0xAAAA
184
185#define ADSP2_WATCHDOG 0x0A
186#define ADSP2_BUS_ERR_ADDR 0x52
187#define ADSP2_REGION_LOCK_STATUS 0x64
188#define ADSP2_LOCK_REGION_1_LOCK_REGION_0 0x66
189#define ADSP2_LOCK_REGION_3_LOCK_REGION_2 0x68
190#define ADSP2_LOCK_REGION_5_LOCK_REGION_4 0x6A
191#define ADSP2_LOCK_REGION_7_LOCK_REGION_6 0x6C
192#define ADSP2_LOCK_REGION_9_LOCK_REGION_8 0x6E
193#define ADSP2_LOCK_REGION_CTRL 0x7A
194#define ADSP2_PMEM_ERR_ADDR_XMEM_ERR_ADDR 0x7C
195
196#define ADSP2_REGION_LOCK_ERR_MASK 0x8000
197#define ADSP2_SLAVE_ERR_MASK 0x4000
198#define ADSP2_WDT_TIMEOUT_STS_MASK 0x2000
199#define ADSP2_CTRL_ERR_PAUSE_ENA 0x0002
200#define ADSP2_CTRL_ERR_EINT 0x0001
201
202#define ADSP2_BUS_ERR_ADDR_MASK 0x00FFFFFF
203#define ADSP2_XMEM_ERR_ADDR_MASK 0x0000FFFF
204#define ADSP2_PMEM_ERR_ADDR_MASK 0x7FFF0000
205#define ADSP2_PMEM_ERR_ADDR_SHIFT 16
206#define ADSP2_WDT_ENA_MASK 0xFFFFFFFD
207
208#define ADSP2_LOCK_REGION_SHIFT 16
209
163#define ADSP_MAX_STD_CTRL_SIZE 512 210#define ADSP_MAX_STD_CTRL_SIZE 512
164 211
165#define WM_ADSP_ACKED_CTL_TIMEOUT_MS 100 212#define WM_ADSP_ACKED_CTL_TIMEOUT_MS 100
@@ -683,6 +730,9 @@ static const struct soc_enum wm_adsp_fw_enum[] = {
683 SOC_ENUM_SINGLE(0, 1, ARRAY_SIZE(wm_adsp_fw_text), wm_adsp_fw_text), 730 SOC_ENUM_SINGLE(0, 1, ARRAY_SIZE(wm_adsp_fw_text), wm_adsp_fw_text),
684 SOC_ENUM_SINGLE(0, 2, ARRAY_SIZE(wm_adsp_fw_text), wm_adsp_fw_text), 731 SOC_ENUM_SINGLE(0, 2, ARRAY_SIZE(wm_adsp_fw_text), wm_adsp_fw_text),
685 SOC_ENUM_SINGLE(0, 3, ARRAY_SIZE(wm_adsp_fw_text), wm_adsp_fw_text), 732 SOC_ENUM_SINGLE(0, 3, ARRAY_SIZE(wm_adsp_fw_text), wm_adsp_fw_text),
733 SOC_ENUM_SINGLE(0, 4, ARRAY_SIZE(wm_adsp_fw_text), wm_adsp_fw_text),
734 SOC_ENUM_SINGLE(0, 5, ARRAY_SIZE(wm_adsp_fw_text), wm_adsp_fw_text),
735 SOC_ENUM_SINGLE(0, 6, ARRAY_SIZE(wm_adsp_fw_text), wm_adsp_fw_text),
686}; 736};
687 737
688const struct snd_kcontrol_new wm_adsp_fw_controls[] = { 738const struct snd_kcontrol_new wm_adsp_fw_controls[] = {
@@ -694,6 +744,12 @@ const struct snd_kcontrol_new wm_adsp_fw_controls[] = {
694 wm_adsp_fw_get, wm_adsp_fw_put), 744 wm_adsp_fw_get, wm_adsp_fw_put),
695 SOC_ENUM_EXT("DSP4 Firmware", wm_adsp_fw_enum[3], 745 SOC_ENUM_EXT("DSP4 Firmware", wm_adsp_fw_enum[3],
696 wm_adsp_fw_get, wm_adsp_fw_put), 746 wm_adsp_fw_get, wm_adsp_fw_put),
747 SOC_ENUM_EXT("DSP5 Firmware", wm_adsp_fw_enum[4],
748 wm_adsp_fw_get, wm_adsp_fw_put),
749 SOC_ENUM_EXT("DSP6 Firmware", wm_adsp_fw_enum[5],
750 wm_adsp_fw_get, wm_adsp_fw_put),
751 SOC_ENUM_EXT("DSP7 Firmware", wm_adsp_fw_enum[6],
752 wm_adsp_fw_get, wm_adsp_fw_put),
697}; 753};
698EXPORT_SYMBOL_GPL(wm_adsp_fw_controls); 754EXPORT_SYMBOL_GPL(wm_adsp_fw_controls);
699 755
@@ -750,6 +806,29 @@ static void wm_adsp2_show_fw_status(struct wm_adsp *dsp)
750 be16_to_cpu(scratch[3])); 806 be16_to_cpu(scratch[3]));
751} 807}
752 808
809static void wm_adsp2v2_show_fw_status(struct wm_adsp *dsp)
810{
811 u32 scratch[2];
812 int ret;
813
814 ret = regmap_raw_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH0_1,
815 scratch, sizeof(scratch));
816
817 if (ret) {
818 adsp_err(dsp, "Failed to read SCRATCH regs: %d\n", ret);
819 return;
820 }
821
822 scratch[0] = be32_to_cpu(scratch[0]);
823 scratch[1] = be32_to_cpu(scratch[1]);
824
825 adsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
826 scratch[0] & 0xFFFF,
827 scratch[0] >> 16,
828 scratch[1] & 0xFFFF,
829 scratch[1] >> 16);
830}
831
753static inline struct wm_coeff_ctl *bytes_ext_to_ctl(struct soc_bytes_ext *ext) 832static inline struct wm_coeff_ctl *bytes_ext_to_ctl(struct soc_bytes_ext *ext)
754{ 833{
755 return container_of(ext, struct wm_coeff_ctl, bytes_ext); 834 return container_of(ext, struct wm_coeff_ctl, bytes_ext);
@@ -2435,10 +2514,17 @@ static int wm_adsp2_ena(struct wm_adsp *dsp)
2435 unsigned int val; 2514 unsigned int val;
2436 int ret, count; 2515 int ret, count;
2437 2516
2438 ret = regmap_update_bits_async(dsp->regmap, dsp->base + ADSP2_CONTROL, 2517 switch (dsp->rev) {
2439 ADSP2_SYS_ENA, ADSP2_SYS_ENA); 2518 case 0:
2440 if (ret != 0) 2519 ret = regmap_update_bits_async(dsp->regmap,
2441 return ret; 2520 dsp->base + ADSP2_CONTROL,
2521 ADSP2_SYS_ENA, ADSP2_SYS_ENA);
2522 if (ret != 0)
2523 return ret;
2524 break;
2525 default:
2526 break;
2527 }
2442 2528
2443 /* Wait for the RAM to start, should be near instantaneous */ 2529 /* Wait for the RAM to start, should be near instantaneous */
2444 for (count = 0; count < 10; ++count) { 2530 for (count = 0; count < 10; ++count) {
@@ -2497,11 +2583,17 @@ static void wm_adsp2_boot_work(struct work_struct *work)
2497 if (ret != 0) 2583 if (ret != 0)
2498 goto err_ena; 2584 goto err_ena;
2499 2585
2500 /* Turn DSP back off until we are ready to run */ 2586 switch (dsp->rev) {
2501 ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL, 2587 case 0:
2502 ADSP2_SYS_ENA, 0); 2588 /* Turn DSP back off until we are ready to run */
2503 if (ret != 0) 2589 ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
2504 goto err_ena; 2590 ADSP2_SYS_ENA, 0);
2591 if (ret != 0)
2592 goto err_ena;
2593 break;
2594 default:
2595 break;
2596 }
2505 2597
2506 dsp->booted = true; 2598 dsp->booted = true;
2507 2599
@@ -2523,12 +2615,21 @@ static void wm_adsp2_set_dspclk(struct wm_adsp *dsp, unsigned int freq)
2523{ 2615{
2524 int ret; 2616 int ret;
2525 2617
2526 ret = regmap_update_bits_async(dsp->regmap, 2618 switch (dsp->rev) {
2527 dsp->base + ADSP2_CLOCKING, 2619 case 0:
2528 ADSP2_CLK_SEL_MASK, 2620 ret = regmap_update_bits_async(dsp->regmap,
2529 freq << ADSP2_CLK_SEL_SHIFT); 2621 dsp->base + ADSP2_CLOCKING,
2530 if (ret != 0) 2622 ADSP2_CLK_SEL_MASK,
2531 adsp_err(dsp, "Failed to set clock rate: %d\n", ret); 2623 freq << ADSP2_CLK_SEL_SHIFT);
2624 if (ret) {
2625 adsp_err(dsp, "Failed to set clock rate: %d\n", ret);
2626 return;
2627 }
2628 break;
2629 default:
2630 /* clock is handled by parent codec driver */
2631 break;
2632 }
2532} 2633}
2533 2634
2534int wm_adsp2_preloader_get(struct snd_kcontrol *kcontrol, 2635int wm_adsp2_preloader_get(struct snd_kcontrol *kcontrol,
@@ -2568,6 +2669,18 @@ int wm_adsp2_preloader_put(struct snd_kcontrol *kcontrol,
2568} 2669}
2569EXPORT_SYMBOL_GPL(wm_adsp2_preloader_put); 2670EXPORT_SYMBOL_GPL(wm_adsp2_preloader_put);
2570 2671
2672static void wm_adsp_stop_watchdog(struct wm_adsp *dsp)
2673{
2674 switch (dsp->rev) {
2675 case 0:
2676 case 1:
2677 return;
2678 default:
2679 regmap_update_bits(dsp->regmap, dsp->base + ADSP2_WATCHDOG,
2680 ADSP2_WDT_ENA_MASK, 0);
2681 }
2682}
2683
2571int wm_adsp2_early_event(struct snd_soc_dapm_widget *w, 2684int wm_adsp2_early_event(struct snd_soc_dapm_widget *w,
2572 struct snd_kcontrol *kcontrol, int event, 2685 struct snd_kcontrol *kcontrol, int event,
2573 unsigned int freq) 2686 unsigned int freq)
@@ -2640,6 +2753,8 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w,
2640 if (ret != 0) 2753 if (ret != 0)
2641 goto err; 2754 goto err;
2642 2755
2756 wm_adsp2_lock(dsp, dsp->lock_regions);
2757
2643 ret = regmap_update_bits(dsp->regmap, 2758 ret = regmap_update_bits(dsp->regmap,
2644 dsp->base + ADSP2_CONTROL, 2759 dsp->base + ADSP2_CONTROL,
2645 ADSP2_CORE_ENA | ADSP2_START, 2760 ADSP2_CORE_ENA | ADSP2_START,
@@ -2663,23 +2778,49 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w,
2663 /* Tell the firmware to cleanup */ 2778 /* Tell the firmware to cleanup */
2664 wm_adsp_signal_event_controls(dsp, WM_ADSP_FW_EVENT_SHUTDOWN); 2779 wm_adsp_signal_event_controls(dsp, WM_ADSP_FW_EVENT_SHUTDOWN);
2665 2780
2781 wm_adsp_stop_watchdog(dsp);
2782
2666 /* Log firmware state, it can be useful for analysis */ 2783 /* Log firmware state, it can be useful for analysis */
2667 wm_adsp2_show_fw_status(dsp); 2784 switch (dsp->rev) {
2785 case 0:
2786 wm_adsp2_show_fw_status(dsp);
2787 break;
2788 default:
2789 wm_adsp2v2_show_fw_status(dsp);
2790 break;
2791 }
2668 2792
2669 mutex_lock(&dsp->pwr_lock); 2793 mutex_lock(&dsp->pwr_lock);
2670 2794
2671 dsp->running = false; 2795 dsp->running = false;
2672 2796
2673 regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL, 2797 regmap_update_bits(dsp->regmap,
2798 dsp->base + ADSP2_CONTROL,
2674 ADSP2_CORE_ENA | ADSP2_START, 0); 2799 ADSP2_CORE_ENA | ADSP2_START, 0);
2675 2800
2676 /* Make sure DMAs are quiesced */ 2801 /* Make sure DMAs are quiesced */
2677 regmap_write(dsp->regmap, dsp->base + ADSP2_RDMA_CONFIG_1, 0); 2802 switch (dsp->rev) {
2678 regmap_write(dsp->regmap, dsp->base + ADSP2_WDMA_CONFIG_1, 0); 2803 case 0:
2679 regmap_write(dsp->regmap, dsp->base + ADSP2_WDMA_CONFIG_2, 0); 2804 regmap_write(dsp->regmap,
2680 2805 dsp->base + ADSP2_RDMA_CONFIG_1, 0);
2681 regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL, 2806 regmap_write(dsp->regmap,
2682 ADSP2_SYS_ENA, 0); 2807 dsp->base + ADSP2_WDMA_CONFIG_1, 0);
2808 regmap_write(dsp->regmap,
2809 dsp->base + ADSP2_WDMA_CONFIG_2, 0);
2810
2811 regmap_update_bits(dsp->regmap,
2812 dsp->base + ADSP2_CONTROL,
2813 ADSP2_SYS_ENA, 0);
2814 break;
2815 default:
2816 regmap_write(dsp->regmap,
2817 dsp->base + ADSP2_RDMA_CONFIG_1, 0);
2818 regmap_write(dsp->regmap,
2819 dsp->base + ADSP2_WDMA_CONFIG_1, 0);
2820 regmap_write(dsp->regmap,
2821 dsp->base + ADSP2V2_WDMA_CONFIG_2, 0);
2822 break;
2823 }
2683 2824
2684 if (wm_adsp_fw[dsp->fw].num_caps != 0) 2825 if (wm_adsp_fw[dsp->fw].num_caps != 0)
2685 wm_adsp_buffer_free(dsp); 2826 wm_adsp_buffer_free(dsp);
@@ -2732,15 +2873,22 @@ int wm_adsp2_init(struct wm_adsp *dsp)
2732{ 2873{
2733 int ret; 2874 int ret;
2734 2875
2735 /* 2876 switch (dsp->rev) {
2736 * Disable the DSP memory by default when in reset for a small 2877 case 0:
2737 * power saving. 2878 /*
2738 */ 2879 * Disable the DSP memory by default when in reset for a small
2739 ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL, 2880 * power saving.
2740 ADSP2_MEM_ENA, 0); 2881 */
2741 if (ret != 0) { 2882 ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
2742 adsp_err(dsp, "Failed to clear memory retention: %d\n", ret); 2883 ADSP2_MEM_ENA, 0);
2743 return ret; 2884 if (ret) {
2885 adsp_err(dsp,
2886 "Failed to clear memory retention: %d\n", ret);
2887 return ret;
2888 }
2889 break;
2890 default:
2891 break;
2744 } 2892 }
2745 2893
2746 INIT_LIST_HEAD(&dsp->alg_regions); 2894 INIT_LIST_HEAD(&dsp->alg_regions);
@@ -3523,4 +3671,94 @@ int wm_adsp_compr_copy(struct snd_compr_stream *stream, char __user *buf,
3523} 3671}
3524EXPORT_SYMBOL_GPL(wm_adsp_compr_copy); 3672EXPORT_SYMBOL_GPL(wm_adsp_compr_copy);
3525 3673
3674int wm_adsp2_lock(struct wm_adsp *dsp, unsigned int lock_regions)
3675{
3676 struct regmap *regmap = dsp->regmap;
3677 unsigned int code0, code1, lock_reg;
3678
3679 if (!(lock_regions & WM_ADSP2_REGION_ALL))
3680 return 0;
3681
3682 lock_regions &= WM_ADSP2_REGION_ALL;
3683 lock_reg = dsp->base + ADSP2_LOCK_REGION_1_LOCK_REGION_0;
3684
3685 while (lock_regions) {
3686 code0 = code1 = 0;
3687 if (lock_regions & BIT(0)) {
3688 code0 = ADSP2_LOCK_CODE_0;
3689 code1 = ADSP2_LOCK_CODE_1;
3690 }
3691 if (lock_regions & BIT(1)) {
3692 code0 |= ADSP2_LOCK_CODE_0 << ADSP2_LOCK_REGION_SHIFT;
3693 code1 |= ADSP2_LOCK_CODE_1 << ADSP2_LOCK_REGION_SHIFT;
3694 }
3695 regmap_write(regmap, lock_reg, code0);
3696 regmap_write(regmap, lock_reg, code1);
3697 lock_regions >>= 2;
3698 lock_reg += 2;
3699 }
3700
3701 return 0;
3702}
3703EXPORT_SYMBOL_GPL(wm_adsp2_lock);
3704
3705irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
3706{
3707 unsigned int val;
3708 struct regmap *regmap = dsp->regmap;
3709 int ret = 0;
3710
3711 ret = regmap_read(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL, &val);
3712 if (ret) {
3713 adsp_err(dsp,
3714 "Failed to read Region Lock Ctrl register: %d\n", ret);
3715 return IRQ_HANDLED;
3716 }
3717
3718 if (val & ADSP2_WDT_TIMEOUT_STS_MASK) {
3719 adsp_err(dsp, "watchdog timeout error\n");
3720 wm_adsp_stop_watchdog(dsp);
3721 }
3722
3723 if (val & (ADSP2_SLAVE_ERR_MASK | ADSP2_REGION_LOCK_ERR_MASK)) {
3724 if (val & ADSP2_SLAVE_ERR_MASK)
3725 adsp_err(dsp, "bus error: slave error\n");
3726 else
3727 adsp_err(dsp, "bus error: region lock error\n");
3728
3729 ret = regmap_read(regmap, dsp->base + ADSP2_BUS_ERR_ADDR, &val);
3730 if (ret) {
3731 adsp_err(dsp,
3732 "Failed to read Bus Err Addr register: %d\n",
3733 ret);
3734 return IRQ_HANDLED;
3735 }
3736
3737 adsp_err(dsp, "bus error address = 0x%x\n",
3738 val & ADSP2_BUS_ERR_ADDR_MASK);
3739
3740 ret = regmap_read(regmap,
3741 dsp->base + ADSP2_PMEM_ERR_ADDR_XMEM_ERR_ADDR,
3742 &val);
3743 if (ret) {
3744 adsp_err(dsp,
3745 "Failed to read Pmem Xmem Err Addr register: %d\n",
3746 ret);
3747 return IRQ_HANDLED;
3748 }
3749
3750 adsp_err(dsp, "xmem error address = 0x%x\n",
3751 val & ADSP2_XMEM_ERR_ADDR_MASK);
3752 adsp_err(dsp, "pmem error address = 0x%x\n",
3753 (val & ADSP2_PMEM_ERR_ADDR_MASK) >>
3754 ADSP2_PMEM_ERR_ADDR_SHIFT);
3755 }
3756
3757 regmap_update_bits(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL,
3758 ADSP2_CTRL_ERR_EINT, ADSP2_CTRL_ERR_EINT);
3759
3760 return IRQ_HANDLED;
3761}
3762EXPORT_SYMBOL_GPL(wm_adsp2_bus_error);
3763
3526MODULE_LICENSE("GPL v2"); 3764MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/wm_adsp.h b/sound/soc/codecs/wm_adsp.h
index 3706b11053a3..41cc11c19b83 100644
--- a/sound/soc/codecs/wm_adsp.h
+++ b/sound/soc/codecs/wm_adsp.h
@@ -23,6 +23,23 @@
23#define WM_ADSP_COMPR_OK 0 23#define WM_ADSP_COMPR_OK 0
24#define WM_ADSP_COMPR_VOICE_TRIGGER 1 24#define WM_ADSP_COMPR_VOICE_TRIGGER 1
25 25
26#define WM_ADSP2_REGION_0 BIT(0)
27#define WM_ADSP2_REGION_1 BIT(1)
28#define WM_ADSP2_REGION_2 BIT(2)
29#define WM_ADSP2_REGION_3 BIT(3)
30#define WM_ADSP2_REGION_4 BIT(4)
31#define WM_ADSP2_REGION_5 BIT(5)
32#define WM_ADSP2_REGION_6 BIT(6)
33#define WM_ADSP2_REGION_7 BIT(7)
34#define WM_ADSP2_REGION_8 BIT(8)
35#define WM_ADSP2_REGION_9 BIT(9)
36#define WM_ADSP2_REGION_1_9 (WM_ADSP2_REGION_1 | \
37 WM_ADSP2_REGION_2 | WM_ADSP2_REGION_3 | \
38 WM_ADSP2_REGION_4 | WM_ADSP2_REGION_5 | \
39 WM_ADSP2_REGION_6 | WM_ADSP2_REGION_7 | \
40 WM_ADSP2_REGION_8 | WM_ADSP2_REGION_9)
41#define WM_ADSP2_REGION_ALL (WM_ADSP2_REGION_0 | WM_ADSP2_REGION_1_9)
42
26struct wm_adsp_region { 43struct wm_adsp_region {
27 int type; 44 int type;
28 unsigned int base; 45 unsigned int base;
@@ -40,6 +57,7 @@ struct wm_adsp_compr_buf;
40 57
41struct wm_adsp { 58struct wm_adsp {
42 const char *part; 59 const char *part;
60 int rev;
43 int num; 61 int num;
44 int type; 62 int type;
45 struct device *dev; 63 struct device *dev;
@@ -75,6 +93,8 @@ struct wm_adsp {
75 93
76 struct mutex pwr_lock; 94 struct mutex pwr_lock;
77 95
96 unsigned int lock_regions;
97
78#ifdef CONFIG_DEBUG_FS 98#ifdef CONFIG_DEBUG_FS
79 struct dentry *debugfs_root; 99 struct dentry *debugfs_root;
80 char *wmfw_file_name; 100 char *wmfw_file_name;
@@ -113,6 +133,10 @@ int wm_adsp1_event(struct snd_soc_dapm_widget *w,
113int wm_adsp2_early_event(struct snd_soc_dapm_widget *w, 133int wm_adsp2_early_event(struct snd_soc_dapm_widget *w,
114 struct snd_kcontrol *kcontrol, int event, 134 struct snd_kcontrol *kcontrol, int event,
115 unsigned int freq); 135 unsigned int freq);
136
137int wm_adsp2_lock(struct wm_adsp *adsp, unsigned int regions);
138irqreturn_t wm_adsp2_bus_error(struct wm_adsp *adsp);
139
116int wm_adsp2_event(struct snd_soc_dapm_widget *w, 140int wm_adsp2_event(struct snd_soc_dapm_widget *w,
117 struct snd_kcontrol *kcontrol, int event); 141 struct snd_kcontrol *kcontrol, int event);
118 142
diff --git a/sound/soc/dwc/Kconfig b/sound/soc/dwc/Kconfig
index c297efe43861..c6fd95fa5ca6 100644
--- a/sound/soc/dwc/Kconfig
+++ b/sound/soc/dwc/Kconfig
@@ -8,10 +8,10 @@ config SND_DESIGNWARE_I2S
8 maximum of 8 channels each for play and record. 8 maximum of 8 channels each for play and record.
9 9
10config SND_DESIGNWARE_PCM 10config SND_DESIGNWARE_PCM
11 tristate "PCM PIO extension for I2S driver" 11 bool "PCM PIO extension for I2S driver"
12 depends on SND_DESIGNWARE_I2S 12 depends on SND_DESIGNWARE_I2S
13 help 13 help
14 Say Y, M or N if you want to add a custom ALSA extension that registers 14 Say Y or N if you want to add a custom ALSA extension that registers
15 a PCM and uses PIO to transfer data. 15 a PCM and uses PIO to transfer data.
16 16
17 This functionality is specially suited for I2S devices that don't have 17 This functionality is specially suited for I2S devices that don't have
diff --git a/sound/soc/dwc/Makefile b/sound/soc/dwc/Makefile
index 38f1ca31c5fa..3e24c0ff95fb 100644
--- a/sound/soc/dwc/Makefile
+++ b/sound/soc/dwc/Makefile
@@ -1,5 +1,5 @@
1# SYNOPSYS Platform Support 1# SYNOPSYS Platform Support
2obj-$(CONFIG_SND_DESIGNWARE_I2S) += designware_i2s.o 2obj-$(CONFIG_SND_DESIGNWARE_I2S) += designware_i2s.o
3ifdef CONFIG_SND_DESIGNWARE_PCM 3
4obj-$(CONFIG_SND_DESIGNWARE_I2S) += designware_pcm.o 4designware_i2s-y := dwc-i2s.o
5endif 5designware_i2s-$(CONFIG_SND_DESIGNWARE_PCM) += dwc-pcm.o
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/dwc-i2s.c
index 9c46e4112026..9c46e4112026 100644
--- a/sound/soc/dwc/designware_i2s.c
+++ b/sound/soc/dwc/dwc-i2s.c
diff --git a/sound/soc/dwc/designware_pcm.c b/sound/soc/dwc/dwc-pcm.c
index 459ec861e6b6..406fd867117b 100644
--- a/sound/soc/dwc/designware_pcm.c
+++ b/sound/soc/dwc/dwc-pcm.c
@@ -129,13 +129,11 @@ void dw_pcm_push_tx(struct dw_i2s_dev *dev)
129{ 129{
130 dw_pcm_transfer(dev, true); 130 dw_pcm_transfer(dev, true);
131} 131}
132EXPORT_SYMBOL_GPL(dw_pcm_push_tx);
133 132
134void dw_pcm_pop_rx(struct dw_i2s_dev *dev) 133void dw_pcm_pop_rx(struct dw_i2s_dev *dev)
135{ 134{
136 dw_pcm_transfer(dev, false); 135 dw_pcm_transfer(dev, false);
137} 136}
138EXPORT_SYMBOL_GPL(dw_pcm_pop_rx);
139 137
140static int dw_pcm_open(struct snd_pcm_substream *substream) 138static int dw_pcm_open(struct snd_pcm_substream *substream)
141{ 139{
@@ -281,4 +279,3 @@ int dw_pcm_register(struct platform_device *pdev)
281{ 279{
282 return devm_snd_soc_register_platform(&pdev->dev, &dw_pcm_platform); 280 return devm_snd_soc_register_platform(&pdev->dev, &dw_pcm_platform);
283} 281}
284EXPORT_SYMBOL_GPL(dw_pcm_register);
diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c
index 883087f2b092..84ef6385736c 100644
--- a/sound/soc/fsl/eukrea-tlv320.c
+++ b/sound/soc/fsl/eukrea-tlv320.c
@@ -64,7 +64,7 @@ static int eukrea_tlv320_hw_params(struct snd_pcm_substream *substream,
64 return 0; 64 return 0;
65} 65}
66 66
67static struct snd_soc_ops eukrea_tlv320_snd_ops = { 67static const struct snd_soc_ops eukrea_tlv320_snd_ops = {
68 .hw_params = eukrea_tlv320_hw_params, 68 .hw_params = eukrea_tlv320_hw_params,
69}; 69};
70 70
diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c
index dc30d780f874..282d841840b1 100644
--- a/sound/soc/fsl/fsl_asrc_dma.c
+++ b/sound/soc/fsl/fsl_asrc_dma.c
@@ -76,7 +76,7 @@ static int fsl_asrc_dma_prepare_and_submit(struct snd_pcm_substream *substream)
76 pair->dma_chan[!dir], runtime->dma_addr, 76 pair->dma_chan[!dir], runtime->dma_addr,
77 snd_pcm_lib_buffer_bytes(substream), 77 snd_pcm_lib_buffer_bytes(substream),
78 snd_pcm_lib_period_bytes(substream), 78 snd_pcm_lib_period_bytes(substream),
79 dir == OUT ? DMA_TO_DEVICE : DMA_FROM_DEVICE, flags); 79 dir == OUT ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, flags);
80 if (!pair->desc[!dir]) { 80 if (!pair->desc[!dir]) {
81 dev_err(dev, "failed to prepare slave DMA for Front-End\n"); 81 dev_err(dev, "failed to prepare slave DMA for Front-End\n");
82 return -ENOMEM; 82 return -ENOMEM;
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 38bfd46f4ad8..809a069d490b 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -19,7 +19,6 @@
19#include "fsl_esai.h" 19#include "fsl_esai.h"
20#include "imx-pcm.h" 20#include "imx-pcm.h"
21 21
22#define FSL_ESAI_RATES SNDRV_PCM_RATE_8000_192000
23#define FSL_ESAI_FORMATS (SNDRV_PCM_FMTBIT_S8 | \ 22#define FSL_ESAI_FORMATS (SNDRV_PCM_FMTBIT_S8 | \
24 SNDRV_PCM_FMTBIT_S16_LE | \ 23 SNDRV_PCM_FMTBIT_S16_LE | \
25 SNDRV_PCM_FMTBIT_S20_3LE | \ 24 SNDRV_PCM_FMTBIT_S20_3LE | \
@@ -647,14 +646,14 @@ static struct snd_soc_dai_driver fsl_esai_dai = {
647 .stream_name = "CPU-Playback", 646 .stream_name = "CPU-Playback",
648 .channels_min = 1, 647 .channels_min = 1,
649 .channels_max = 12, 648 .channels_max = 12,
650 .rates = FSL_ESAI_RATES, 649 .rates = SNDRV_PCM_RATE_8000_192000,
651 .formats = FSL_ESAI_FORMATS, 650 .formats = FSL_ESAI_FORMATS,
652 }, 651 },
653 .capture = { 652 .capture = {
654 .stream_name = "CPU-Capture", 653 .stream_name = "CPU-Capture",
655 .channels_min = 1, 654 .channels_min = 1,
656 .channels_max = 8, 655 .channels_max = 8,
657 .rates = FSL_ESAI_RATES, 656 .rates = SNDRV_PCM_RATE_8000_192000,
658 .formats = FSL_ESAI_FORMATS, 657 .formats = FSL_ESAI_FORMATS,
659 }, 658 },
660 .ops = &fsl_esai_dai_ops, 659 .ops = &fsl_esai_dai_ops,
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index fde08660b63b..173cb8496641 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -35,6 +35,7 @@
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/interrupt.h> 36#include <linux/interrupt.h>
37#include <linux/clk.h> 37#include <linux/clk.h>
38#include <linux/ctype.h>
38#include <linux/device.h> 39#include <linux/device.h>
39#include <linux/delay.h> 40#include <linux/delay.h>
40#include <linux/slab.h> 41#include <linux/slab.h>
@@ -55,16 +56,6 @@
55#include "imx-pcm.h" 56#include "imx-pcm.h"
56 57
57/** 58/**
58 * FSLSSI_I2S_RATES: sample rates supported by the I2S
59 *
60 * This driver currently only supports the SSI running in I2S slave mode,
61 * which means the codec determines the sample rate. Therefore, we tell
62 * ALSA that we support all rates and let the codec driver decide what rates
63 * are really supported.
64 */
65#define FSLSSI_I2S_RATES SNDRV_PCM_RATE_CONTINUOUS
66
67/**
68 * FSLSSI_I2S_FORMATS: audio formats supported by the SSI 59 * FSLSSI_I2S_FORMATS: audio formats supported by the SSI
69 * 60 *
70 * The SSI has a limitation in that the samples must be in the same byte 61 * The SSI has a limitation in that the samples must be in the same byte
@@ -1212,14 +1203,14 @@ static struct snd_soc_dai_driver fsl_ssi_dai_template = {
1212 .stream_name = "CPU-Playback", 1203 .stream_name = "CPU-Playback",
1213 .channels_min = 1, 1204 .channels_min = 1,
1214 .channels_max = 32, 1205 .channels_max = 32,
1215 .rates = FSLSSI_I2S_RATES, 1206 .rates = SNDRV_PCM_RATE_CONTINUOUS,
1216 .formats = FSLSSI_I2S_FORMATS, 1207 .formats = FSLSSI_I2S_FORMATS,
1217 }, 1208 },
1218 .capture = { 1209 .capture = {
1219 .stream_name = "CPU-Capture", 1210 .stream_name = "CPU-Capture",
1220 .channels_min = 1, 1211 .channels_min = 1,
1221 .channels_max = 32, 1212 .channels_max = 32,
1222 .rates = FSLSSI_I2S_RATES, 1213 .rates = SNDRV_PCM_RATE_CONTINUOUS,
1223 .formats = FSLSSI_I2S_FORMATS, 1214 .formats = FSLSSI_I2S_FORMATS,
1224 }, 1215 },
1225 .ops = &fsl_ssi_dai_ops, 1216 .ops = &fsl_ssi_dai_ops,
@@ -1325,14 +1316,10 @@ static struct snd_ac97_bus_ops fsl_ssi_ac97_ops = {
1325 */ 1316 */
1326static void make_lowercase(char *s) 1317static void make_lowercase(char *s)
1327{ 1318{
1328 char *p = s; 1319 if (!s)
1329 char c; 1320 return;
1330 1321 for (; *s; s++)
1331 while ((c = *p)) { 1322 *s = tolower(*s);
1332 if ((c >= 'A') && (c <= 'Z'))
1333 *p = c + ('a' - 'A');
1334 p++;
1335 }
1336} 1323}
1337 1324
1338static int fsl_ssi_imx_probe(struct platform_device *pdev, 1325static int fsl_ssi_imx_probe(struct platform_device *pdev,
diff --git a/sound/soc/fsl/imx-mc13783.c b/sound/soc/fsl/imx-mc13783.c
index bb0459018b45..9d19b808f634 100644
--- a/sound/soc/fsl/imx-mc13783.c
+++ b/sound/soc/fsl/imx-mc13783.c
@@ -48,7 +48,7 @@ static int imx_mc13783_hifi_hw_params(struct snd_pcm_substream *substream,
48 return snd_soc_dai_set_tdm_slot(cpu_dai, 0x3, 0x3, 2, 16); 48 return snd_soc_dai_set_tdm_slot(cpu_dai, 0x3, 0x3, 2, 16);
49} 49}
50 50
51static struct snd_soc_ops imx_mc13783_hifi_ops = { 51static const struct snd_soc_ops imx_mc13783_hifi_ops = {
52 .hw_params = imx_mc13783_hifi_hw_params, 52 .hw_params = imx_mc13783_hifi_hw_params,
53}; 53};
54 54
diff --git a/sound/soc/fsl/imx-pcm-dma.c b/sound/soc/fsl/imx-pcm-dma.c
index f3d3d1ffa84e..314814ddd2b0 100644
--- a/sound/soc/fsl/imx-pcm-dma.c
+++ b/sound/soc/fsl/imx-pcm-dma.c
@@ -33,48 +33,20 @@ static bool filter(struct dma_chan *chan, void *param)
33 return true; 33 return true;
34} 34}
35 35
36static const struct snd_pcm_hardware imx_pcm_hardware = {
37 .info = SNDRV_PCM_INFO_INTERLEAVED |
38 SNDRV_PCM_INFO_BLOCK_TRANSFER |
39 SNDRV_PCM_INFO_MMAP |
40 SNDRV_PCM_INFO_MMAP_VALID |
41 SNDRV_PCM_INFO_PAUSE |
42 SNDRV_PCM_INFO_RESUME,
43 .buffer_bytes_max = IMX_DEFAULT_DMABUF_SIZE,
44 .period_bytes_min = 128,
45 .period_bytes_max = 65535, /* Limited by SDMA engine */
46 .periods_min = 2,
47 .periods_max = 255,
48 .fifo_size = 0,
49};
50
51static const struct snd_dmaengine_pcm_config imx_dmaengine_pcm_config = { 36static const struct snd_dmaengine_pcm_config imx_dmaengine_pcm_config = {
52 .pcm_hardware = &imx_pcm_hardware,
53 .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config, 37 .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
54 .compat_filter_fn = filter, 38 .compat_filter_fn = filter,
55 .prealloc_buffer_size = IMX_DEFAULT_DMABUF_SIZE,
56}; 39};
57 40
58int imx_pcm_dma_init(struct platform_device *pdev, size_t size) 41int imx_pcm_dma_init(struct platform_device *pdev, size_t size)
59{ 42{
60 struct snd_dmaengine_pcm_config *config; 43 struct snd_dmaengine_pcm_config *config;
61 struct snd_pcm_hardware *pcm_hardware;
62 44
63 config = devm_kzalloc(&pdev->dev, 45 config = devm_kzalloc(&pdev->dev,
64 sizeof(struct snd_dmaengine_pcm_config), GFP_KERNEL); 46 sizeof(struct snd_dmaengine_pcm_config), GFP_KERNEL);
65 if (!config) 47 if (!config)
66 return -ENOMEM; 48 return -ENOMEM;
67 *config = imx_dmaengine_pcm_config; 49 *config = imx_dmaengine_pcm_config;
68 if (size)
69 config->prealloc_buffer_size = size;
70
71 pcm_hardware = devm_kzalloc(&pdev->dev,
72 sizeof(struct snd_pcm_hardware), GFP_KERNEL);
73 *pcm_hardware = imx_pcm_hardware;
74 if (size)
75 pcm_hardware->buffer_bytes_max = size;
76
77 config->pcm_hardware = pcm_hardware;
78 50
79 return devm_snd_dmaengine_pcm_register(&pdev->dev, 51 return devm_snd_dmaengine_pcm_register(&pdev->dev,
80 config, 52 config,
diff --git a/sound/soc/fsl/imx-pcm-fiq.c b/sound/soc/fsl/imx-pcm-fiq.c
index dac6688540dc..92410f7ca1fa 100644
--- a/sound/soc/fsl/imx-pcm-fiq.c
+++ b/sound/soc/fsl/imx-pcm-fiq.c
@@ -282,7 +282,7 @@ static int imx_pcm_new(struct snd_soc_pcm_runtime *rtd)
282 return 0; 282 return 0;
283} 283}
284 284
285static int ssi_irq = 0; 285static int ssi_irq;
286 286
287static int imx_pcm_fiq_new(struct snd_soc_pcm_runtime *rtd) 287static int imx_pcm_fiq_new(struct snd_soc_pcm_runtime *rtd)
288{ 288{
diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c
index 1b60958e2080..206b898e554c 100644
--- a/sound/soc/fsl/imx-wm8962.c
+++ b/sound/soc/fsl/imx-wm8962.c
@@ -33,14 +33,14 @@ struct imx_wm8962_data {
33 struct snd_soc_card card; 33 struct snd_soc_card card;
34 char codec_dai_name[DAI_NAME_SIZE]; 34 char codec_dai_name[DAI_NAME_SIZE];
35 char platform_name[DAI_NAME_SIZE]; 35 char platform_name[DAI_NAME_SIZE];
36 struct clk *codec_clk;
37 unsigned int clk_frequency; 36 unsigned int clk_frequency;
38}; 37};
39 38
40struct imx_priv { 39struct imx_priv {
41 struct platform_device *pdev; 40 struct platform_device *pdev;
41 int sample_rate;
42 snd_pcm_format_t sample_format;
42}; 43};
43static struct imx_priv card_priv;
44 44
45static const struct snd_soc_dapm_widget imx_wm8962_dapm_widgets[] = { 45static const struct snd_soc_dapm_widget imx_wm8962_dapm_widgets[] = {
46 SND_SOC_DAPM_HP("Headphone Jack", NULL), 46 SND_SOC_DAPM_HP("Headphone Jack", NULL),
@@ -49,14 +49,14 @@ static const struct snd_soc_dapm_widget imx_wm8962_dapm_widgets[] = {
49 SND_SOC_DAPM_MIC("DMIC", NULL), 49 SND_SOC_DAPM_MIC("DMIC", NULL),
50}; 50};
51 51
52static int sample_rate = 44100;
53static snd_pcm_format_t sample_format = SNDRV_PCM_FORMAT_S16_LE;
54
55static int imx_hifi_hw_params(struct snd_pcm_substream *substream, 52static int imx_hifi_hw_params(struct snd_pcm_substream *substream,
56 struct snd_pcm_hw_params *params) 53 struct snd_pcm_hw_params *params)
57{ 54{
58 sample_rate = params_rate(params); 55 struct snd_soc_pcm_runtime *rtd = substream->private_data;
59 sample_format = params_format(params); 56 struct imx_priv *priv = snd_soc_card_get_drvdata(rtd->card);
57
58 priv->sample_rate = params_rate(params);
59 priv->sample_format = params_format(params);
60 60
61 return 0; 61 return 0;
62} 62}
@@ -71,7 +71,7 @@ static int imx_wm8962_set_bias_level(struct snd_soc_card *card,
71{ 71{
72 struct snd_soc_pcm_runtime *rtd; 72 struct snd_soc_pcm_runtime *rtd;
73 struct snd_soc_dai *codec_dai; 73 struct snd_soc_dai *codec_dai;
74 struct imx_priv *priv = &card_priv; 74 struct imx_priv *priv = snd_soc_card_get_drvdata(card);
75 struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card); 75 struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card);
76 struct device *dev = &priv->pdev->dev; 76 struct device *dev = &priv->pdev->dev;
77 unsigned int pll_out; 77 unsigned int pll_out;
@@ -85,10 +85,10 @@ static int imx_wm8962_set_bias_level(struct snd_soc_card *card,
85 switch (level) { 85 switch (level) {
86 case SND_SOC_BIAS_PREPARE: 86 case SND_SOC_BIAS_PREPARE:
87 if (dapm->bias_level == SND_SOC_BIAS_STANDBY) { 87 if (dapm->bias_level == SND_SOC_BIAS_STANDBY) {
88 if (sample_format == SNDRV_PCM_FORMAT_S24_LE) 88 if (priv->sample_format == SNDRV_PCM_FORMAT_S24_LE)
89 pll_out = sample_rate * 384; 89 pll_out = priv->sample_rate * 384;
90 else 90 else
91 pll_out = sample_rate * 256; 91 pll_out = priv->sample_rate * 256;
92 92
93 ret = snd_soc_dai_set_pll(codec_dai, WM8962_FLL, 93 ret = snd_soc_dai_set_pll(codec_dai, WM8962_FLL,
94 WM8962_FLL_MCLK, data->clk_frequency, 94 WM8962_FLL_MCLK, data->clk_frequency,
@@ -140,7 +140,7 @@ static int imx_wm8962_late_probe(struct snd_soc_card *card)
140{ 140{
141 struct snd_soc_pcm_runtime *rtd; 141 struct snd_soc_pcm_runtime *rtd;
142 struct snd_soc_dai *codec_dai; 142 struct snd_soc_dai *codec_dai;
143 struct imx_priv *priv = &card_priv; 143 struct imx_priv *priv = snd_soc_card_get_drvdata(card);
144 struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card); 144 struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card);
145 struct device *dev = &priv->pdev->dev; 145 struct device *dev = &priv->pdev->dev;
146 int ret; 146 int ret;
@@ -160,13 +160,20 @@ static int imx_wm8962_probe(struct platform_device *pdev)
160 struct device_node *np = pdev->dev.of_node; 160 struct device_node *np = pdev->dev.of_node;
161 struct device_node *ssi_np, *codec_np; 161 struct device_node *ssi_np, *codec_np;
162 struct platform_device *ssi_pdev; 162 struct platform_device *ssi_pdev;
163 struct imx_priv *priv = &card_priv;
164 struct i2c_client *codec_dev; 163 struct i2c_client *codec_dev;
165 struct imx_wm8962_data *data; 164 struct imx_wm8962_data *data;
165 struct imx_priv *priv;
166 struct clk *codec_clk;
166 int int_port, ext_port; 167 int int_port, ext_port;
167 int ret; 168 int ret;
168 169
170 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
171 if (!priv)
172 return -ENOMEM;
173
169 priv->pdev = pdev; 174 priv->pdev = pdev;
175 priv->sample_rate = 44100;
176 priv->sample_format = SNDRV_PCM_FORMAT_S16_LE;
170 177
171 ret = of_property_read_u32(np, "mux-int-port", &int_port); 178 ret = of_property_read_u32(np, "mux-int-port", &int_port);
172 if (ret) { 179 if (ret) {
@@ -231,19 +238,15 @@ static int imx_wm8962_probe(struct platform_device *pdev)
231 goto fail; 238 goto fail;
232 } 239 }
233 240
234 data->codec_clk = devm_clk_get(&codec_dev->dev, NULL); 241 codec_clk = clk_get(&codec_dev->dev, NULL);
235 if (IS_ERR(data->codec_clk)) { 242 if (IS_ERR(codec_clk)) {
236 ret = PTR_ERR(data->codec_clk); 243 ret = PTR_ERR(codec_clk);
237 dev_err(&codec_dev->dev, "failed to get codec clk: %d\n", ret); 244 dev_err(&codec_dev->dev, "failed to get codec clk: %d\n", ret);
238 goto fail; 245 goto fail;
239 } 246 }
240 247
241 data->clk_frequency = clk_get_rate(data->codec_clk); 248 data->clk_frequency = clk_get_rate(codec_clk);
242 ret = clk_prepare_enable(data->codec_clk); 249 clk_put(codec_clk);
243 if (ret) {
244 dev_err(&codec_dev->dev, "failed to enable codec clk: %d\n", ret);
245 goto fail;
246 }
247 250
248 data->dai.name = "HiFi"; 251 data->dai.name = "HiFi";
249 data->dai.stream_name = "HiFi"; 252 data->dai.stream_name = "HiFi";
@@ -258,10 +261,10 @@ static int imx_wm8962_probe(struct platform_device *pdev)
258 data->card.dev = &pdev->dev; 261 data->card.dev = &pdev->dev;
259 ret = snd_soc_of_parse_card_name(&data->card, "model"); 262 ret = snd_soc_of_parse_card_name(&data->card, "model");
260 if (ret) 263 if (ret)
261 goto clk_fail; 264 goto fail;
262 ret = snd_soc_of_parse_audio_routing(&data->card, "audio-routing"); 265 ret = snd_soc_of_parse_audio_routing(&data->card, "audio-routing");
263 if (ret) 266 if (ret)
264 goto clk_fail; 267 goto fail;
265 data->card.num_links = 1; 268 data->card.num_links = 1;
266 data->card.owner = THIS_MODULE; 269 data->card.owner = THIS_MODULE;
267 data->card.dai_link = &data->dai; 270 data->card.dai_link = &data->dai;
@@ -277,16 +280,9 @@ static int imx_wm8962_probe(struct platform_device *pdev)
277 ret = devm_snd_soc_register_card(&pdev->dev, &data->card); 280 ret = devm_snd_soc_register_card(&pdev->dev, &data->card);
278 if (ret) { 281 if (ret) {
279 dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret); 282 dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
280 goto clk_fail; 283 goto fail;
281 } 284 }
282 285
283 of_node_put(ssi_np);
284 of_node_put(codec_np);
285
286 return 0;
287
288clk_fail:
289 clk_disable_unprepare(data->codec_clk);
290fail: 286fail:
291 of_node_put(ssi_np); 287 of_node_put(ssi_np);
292 of_node_put(codec_np); 288 of_node_put(codec_np);
@@ -294,17 +290,6 @@ fail:
294 return ret; 290 return ret;
295} 291}
296 292
297static int imx_wm8962_remove(struct platform_device *pdev)
298{
299 struct snd_soc_card *card = platform_get_drvdata(pdev);
300 struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card);
301
302 if (!IS_ERR(data->codec_clk))
303 clk_disable_unprepare(data->codec_clk);
304
305 return 0;
306}
307
308static const struct of_device_id imx_wm8962_dt_ids[] = { 293static const struct of_device_id imx_wm8962_dt_ids[] = {
309 { .compatible = "fsl,imx-audio-wm8962", }, 294 { .compatible = "fsl,imx-audio-wm8962", },
310 { /* sentinel */ } 295 { /* sentinel */ }
@@ -318,7 +303,6 @@ static struct platform_driver imx_wm8962_driver = {
318 .of_match_table = imx_wm8962_dt_ids, 303 .of_match_table = imx_wm8962_dt_ids,
319 }, 304 },
320 .probe = imx_wm8962_probe, 305 .probe = imx_wm8962_probe,
321 .remove = imx_wm8962_remove,
322}; 306};
323module_platform_driver(imx_wm8962_driver); 307module_platform_driver(imx_wm8962_driver);
324 308
diff --git a/sound/soc/fsl/mpc8610_hpcd.c b/sound/soc/fsl/mpc8610_hpcd.c
index ddf49f30b23f..a639b52c16f6 100644
--- a/sound/soc/fsl/mpc8610_hpcd.c
+++ b/sound/soc/fsl/mpc8610_hpcd.c
@@ -174,7 +174,7 @@ static int mpc8610_hpcd_machine_remove(struct snd_soc_card *card)
174/** 174/**
175 * mpc8610_hpcd_ops: ASoC machine driver operations 175 * mpc8610_hpcd_ops: ASoC machine driver operations
176 */ 176 */
177static struct snd_soc_ops mpc8610_hpcd_ops = { 177static const struct snd_soc_ops mpc8610_hpcd_ops = {
178 .startup = mpc8610_hpcd_startup, 178 .startup = mpc8610_hpcd_startup,
179}; 179};
180 180
diff --git a/sound/soc/fsl/mx27vis-aic32x4.c b/sound/soc/fsl/mx27vis-aic32x4.c
index 198eeb3f3f7a..d7ec3d20065c 100644
--- a/sound/soc/fsl/mx27vis-aic32x4.c
+++ b/sound/soc/fsl/mx27vis-aic32x4.c
@@ -73,7 +73,7 @@ static int mx27vis_aic32x4_hw_params(struct snd_pcm_substream *substream,
73 return 0; 73 return 0;
74} 74}
75 75
76static struct snd_soc_ops mx27vis_aic32x4_snd_ops = { 76static const struct snd_soc_ops mx27vis_aic32x4_snd_ops = {
77 .hw_params = mx27vis_aic32x4_hw_params, 77 .hw_params = mx27vis_aic32x4_hw_params,
78}; 78};
79 79
diff --git a/sound/soc/fsl/p1022_ds.c b/sound/soc/fsl/p1022_ds.c
index a1f780ecadf5..41c623c55c16 100644
--- a/sound/soc/fsl/p1022_ds.c
+++ b/sound/soc/fsl/p1022_ds.c
@@ -184,7 +184,7 @@ static int p1022_ds_machine_remove(struct snd_soc_card *card)
184/** 184/**
185 * p1022_ds_ops: ASoC machine driver operations 185 * p1022_ds_ops: ASoC machine driver operations
186 */ 186 */
187static struct snd_soc_ops p1022_ds_ops = { 187static const struct snd_soc_ops p1022_ds_ops = {
188 .startup = p1022_ds_startup, 188 .startup = p1022_ds_startup,
189}; 189};
190 190
diff --git a/sound/soc/fsl/p1022_rdk.c b/sound/soc/fsl/p1022_rdk.c
index d4d88a8cb9c0..4afbdd610bfa 100644
--- a/sound/soc/fsl/p1022_rdk.c
+++ b/sound/soc/fsl/p1022_rdk.c
@@ -188,7 +188,7 @@ static int p1022_rdk_machine_remove(struct snd_soc_card *card)
188/** 188/**
189 * p1022_rdk_ops: ASoC machine driver operations 189 * p1022_rdk_ops: ASoC machine driver operations
190 */ 190 */
191static struct snd_soc_ops p1022_rdk_ops = { 191static const struct snd_soc_ops p1022_rdk_ops = {
192 .startup = p1022_rdk_startup, 192 .startup = p1022_rdk_startup,
193}; 193};
194 194
diff --git a/sound/soc/fsl/phycore-ac97.c b/sound/soc/fsl/phycore-ac97.c
index ae403c29688f..66fb6c4614d2 100644
--- a/sound/soc/fsl/phycore-ac97.c
+++ b/sound/soc/fsl/phycore-ac97.c
@@ -23,7 +23,7 @@
23 23
24static struct snd_soc_card imx_phycore; 24static struct snd_soc_card imx_phycore;
25 25
26static struct snd_soc_ops imx_phycore_hifi_ops = { 26static const struct snd_soc_ops imx_phycore_hifi_ops = {
27}; 27};
28 28
29static struct snd_soc_dai_link imx_phycore_dai_ac97[] = { 29static struct snd_soc_dai_link imx_phycore_dai_ac97[] = {
diff --git a/sound/soc/fsl/wm1133-ev1.c b/sound/soc/fsl/wm1133-ev1.c
index b454972dce35..cdaf16367b47 100644
--- a/sound/soc/fsl/wm1133-ev1.c
+++ b/sound/soc/fsl/wm1133-ev1.c
@@ -139,7 +139,7 @@ static int wm1133_ev1_hw_params(struct snd_pcm_substream *substream,
139 return 0; 139 return 0;
140} 140}
141 141
142static struct snd_soc_ops wm1133_ev1_ops = { 142static const struct snd_soc_ops wm1133_ev1_ops = {
143 .hw_params = wm1133_ev1_hw_params, 143 .hw_params = wm1133_ev1_hw_params,
144}; 144};
145 145
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 85b4f1806514..2c9dedab5184 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -40,9 +40,10 @@ struct simple_card_data {
40 struct snd_soc_dai_link *dai_link; 40 struct snd_soc_dai_link *dai_link;
41}; 41};
42 42
43#define simple_priv_to_dev(priv) ((priv)->snd_card.dev) 43#define simple_priv_to_card(priv) (&(priv)->snd_card)
44#define simple_priv_to_link(priv, i) ((priv)->snd_card.dai_link + (i))
45#define simple_priv_to_props(priv, i) ((priv)->dai_props + (i)) 44#define simple_priv_to_props(priv, i) ((priv)->dai_props + (i))
45#define simple_priv_to_dev(priv) (simple_priv_to_card(priv)->dev)
46#define simple_priv_to_link(priv, i) (simple_priv_to_card(priv)->dai_link + (i))
46 47
47#define DAI "sound-dai" 48#define DAI "sound-dai"
48#define CELL "#sound-dai-cells" 49#define CELL "#sound-dai-cells"
@@ -323,6 +324,7 @@ static int asoc_simple_card_parse_aux_devs(struct device_node *node,
323{ 324{
324 struct device *dev = simple_priv_to_dev(priv); 325 struct device *dev = simple_priv_to_dev(priv);
325 struct device_node *aux_node; 326 struct device_node *aux_node;
327 struct snd_soc_card *card = simple_priv_to_card(priv);
326 int i, n, len; 328 int i, n, len;
327 329
328 if (!of_find_property(node, PREFIX "aux-devs", &len)) 330 if (!of_find_property(node, PREFIX "aux-devs", &len))
@@ -332,19 +334,19 @@ static int asoc_simple_card_parse_aux_devs(struct device_node *node,
332 if (n <= 0) 334 if (n <= 0)
333 return -EINVAL; 335 return -EINVAL;
334 336
335 priv->snd_card.aux_dev = devm_kzalloc(dev, 337 card->aux_dev = devm_kzalloc(dev,
336 n * sizeof(*priv->snd_card.aux_dev), GFP_KERNEL); 338 n * sizeof(*card->aux_dev), GFP_KERNEL);
337 if (!priv->snd_card.aux_dev) 339 if (!card->aux_dev)
338 return -ENOMEM; 340 return -ENOMEM;
339 341
340 for (i = 0; i < n; i++) { 342 for (i = 0; i < n; i++) {
341 aux_node = of_parse_phandle(node, PREFIX "aux-devs", i); 343 aux_node = of_parse_phandle(node, PREFIX "aux-devs", i);
342 if (!aux_node) 344 if (!aux_node)
343 return -EINVAL; 345 return -EINVAL;
344 priv->snd_card.aux_dev[i].codec_of_node = aux_node; 346 card->aux_dev[i].codec_of_node = aux_node;
345 } 347 }
346 348
347 priv->snd_card.num_aux_devs = n; 349 card->num_aux_devs = n;
348 return 0; 350 return 0;
349} 351}
350 352
@@ -352,6 +354,7 @@ static int asoc_simple_card_parse_of(struct device_node *node,
352 struct simple_card_data *priv) 354 struct simple_card_data *priv)
353{ 355{
354 struct device *dev = simple_priv_to_dev(priv); 356 struct device *dev = simple_priv_to_dev(priv);
357 struct snd_soc_card *card = simple_priv_to_card(priv);
355 struct device_node *dai_link; 358 struct device_node *dai_link;
356 int ret; 359 int ret;
357 360
@@ -362,7 +365,7 @@ static int asoc_simple_card_parse_of(struct device_node *node,
362 365
363 /* The off-codec widgets */ 366 /* The off-codec widgets */
364 if (of_property_read_bool(node, PREFIX "widgets")) { 367 if (of_property_read_bool(node, PREFIX "widgets")) {
365 ret = snd_soc_of_parse_audio_simple_widgets(&priv->snd_card, 368 ret = snd_soc_of_parse_audio_simple_widgets(card,
366 PREFIX "widgets"); 369 PREFIX "widgets");
367 if (ret) 370 if (ret)
368 goto card_parse_end; 371 goto card_parse_end;
@@ -370,7 +373,7 @@ static int asoc_simple_card_parse_of(struct device_node *node,
370 373
371 /* DAPM routes */ 374 /* DAPM routes */
372 if (of_property_read_bool(node, PREFIX "routing")) { 375 if (of_property_read_bool(node, PREFIX "routing")) {
373 ret = snd_soc_of_parse_audio_routing(&priv->snd_card, 376 ret = snd_soc_of_parse_audio_routing(card,
374 PREFIX "routing"); 377 PREFIX "routing");
375 if (ret) 378 if (ret)
376 goto card_parse_end; 379 goto card_parse_end;
@@ -401,7 +404,7 @@ static int asoc_simple_card_parse_of(struct device_node *node,
401 goto card_parse_end; 404 goto card_parse_end;
402 } 405 }
403 406
404 ret = asoc_simple_card_parse_card_name(&priv->snd_card, PREFIX); 407 ret = asoc_simple_card_parse_card_name(card, PREFIX);
405 if (ret < 0) 408 if (ret < 0)
406 goto card_parse_end; 409 goto card_parse_end;
407 410
@@ -418,8 +421,9 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
418 struct simple_card_data *priv; 421 struct simple_card_data *priv;
419 struct snd_soc_dai_link *dai_link; 422 struct snd_soc_dai_link *dai_link;
420 struct simple_dai_props *dai_props; 423 struct simple_dai_props *dai_props;
421 struct device_node *np = pdev->dev.of_node;
422 struct device *dev = &pdev->dev; 424 struct device *dev = &pdev->dev;
425 struct device_node *np = dev->of_node;
426 struct snd_soc_card *card;
423 int num, ret; 427 int num, ret;
424 428
425 /* Get the number of DAI links */ 429 /* Get the number of DAI links */
@@ -442,10 +446,11 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
442 priv->dai_link = dai_link; 446 priv->dai_link = dai_link;
443 447
444 /* Init snd_soc_card */ 448 /* Init snd_soc_card */
445 priv->snd_card.owner = THIS_MODULE; 449 card = simple_priv_to_card(priv);
446 priv->snd_card.dev = dev; 450 card->owner = THIS_MODULE;
447 priv->snd_card.dai_link = priv->dai_link; 451 card->dev = dev;
448 priv->snd_card.num_links = num; 452 card->dai_link = priv->dai_link;
453 card->num_links = num;
449 454
450 if (np && of_device_is_available(np)) { 455 if (np && of_device_is_available(np)) {
451 456
@@ -474,7 +479,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
474 return -EINVAL; 479 return -EINVAL;
475 } 480 }
476 481
477 priv->snd_card.name = (cinfo->card) ? cinfo->card : cinfo->name; 482 card->name = (cinfo->card) ? cinfo->card : cinfo->name;
478 dai_link->name = cinfo->name; 483 dai_link->name = cinfo->name;
479 dai_link->stream_name = cinfo->name; 484 dai_link->stream_name = cinfo->name;
480 dai_link->platform_name = cinfo->platform; 485 dai_link->platform_name = cinfo->platform;
@@ -489,13 +494,13 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
489 sizeof(priv->dai_props->codec_dai)); 494 sizeof(priv->dai_props->codec_dai));
490 } 495 }
491 496
492 snd_soc_card_set_drvdata(&priv->snd_card, priv); 497 snd_soc_card_set_drvdata(card, priv);
493 498
494 ret = devm_snd_soc_register_card(&pdev->dev, &priv->snd_card); 499 ret = devm_snd_soc_register_card(dev, card);
495 if (ret >= 0) 500 if (ret >= 0)
496 return ret; 501 return ret;
497err: 502err:
498 asoc_simple_card_clean_reference(&priv->snd_card); 503 asoc_simple_card_clean_reference(card);
499 504
500 return ret; 505 return ret;
501} 506}
diff --git a/sound/soc/generic/simple-scu-card.c b/sound/soc/generic/simple-scu-card.c
index 308ff4c11a8d..dcbcab230d1b 100644
--- a/sound/soc/generic/simple-scu-card.c
+++ b/sound/soc/generic/simple-scu-card.c
@@ -31,9 +31,10 @@ struct simple_card_data {
31 u32 convert_channels; 31 u32 convert_channels;
32}; 32};
33 33
34#define simple_priv_to_dev(priv) ((priv)->snd_card.dev) 34#define simple_priv_to_card(priv) (&(priv)->snd_card)
35#define simple_priv_to_link(priv, i) ((priv)->snd_card.dai_link + (i))
36#define simple_priv_to_props(priv, i) ((priv)->dai_props + (i)) 35#define simple_priv_to_props(priv, i) ((priv)->dai_props + (i))
36#define simple_priv_to_dev(priv) (simple_priv_to_card(priv)->dev)
37#define simple_priv_to_link(priv, i) (simple_priv_to_card(priv)->dai_link + (i))
37 38
38#define DAI "sound-dai" 39#define DAI "sound-dai"
39#define CELL "#sound-dai-cells" 40#define CELL "#sound-dai-cells"
@@ -109,6 +110,7 @@ static int asoc_simple_card_dai_link_of(struct device_node *np,
109 struct device *dev = simple_priv_to_dev(priv); 110 struct device *dev = simple_priv_to_dev(priv);
110 struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, idx); 111 struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, idx);
111 struct asoc_simple_dai *dai_props = simple_priv_to_props(priv, idx); 112 struct asoc_simple_dai *dai_props = simple_priv_to_props(priv, idx);
113 struct snd_soc_card *card = simple_priv_to_card(priv);
112 int ret; 114 int ret;
113 115
114 if (is_fe) { 116 if (is_fe) {
@@ -163,7 +165,7 @@ static int asoc_simple_card_dai_link_of(struct device_node *np,
163 if (ret < 0) 165 if (ret < 0)
164 return ret; 166 return ret;
165 167
166 snd_soc_of_parse_audio_prefix(&priv->snd_card, 168 snd_soc_of_parse_audio_prefix(card,
167 &priv->codec_conf, 169 &priv->codec_conf,
168 dai_link->codec_of_node, 170 dai_link->codec_of_node,
169 PREFIX "prefix"); 171 PREFIX "prefix");
@@ -201,6 +203,7 @@ static int asoc_simple_card_parse_of(struct device_node *node,
201{ 203{
202 struct device *dev = simple_priv_to_dev(priv); 204 struct device *dev = simple_priv_to_dev(priv);
203 struct device_node *np; 205 struct device_node *np;
206 struct snd_soc_card *card = simple_priv_to_card(priv);
204 unsigned int daifmt = 0; 207 unsigned int daifmt = 0;
205 bool is_fe; 208 bool is_fe;
206 int ret, i; 209 int ret, i;
@@ -208,7 +211,7 @@ static int asoc_simple_card_parse_of(struct device_node *node,
208 if (!node) 211 if (!node)
209 return -EINVAL; 212 return -EINVAL;
210 213
211 ret = snd_soc_of_parse_audio_routing(&priv->snd_card, PREFIX "routing"); 214 ret = snd_soc_of_parse_audio_routing(card, PREFIX "routing");
212 if (ret < 0) 215 if (ret < 0)
213 return ret; 216 return ret;
214 217
@@ -239,12 +242,12 @@ static int asoc_simple_card_parse_of(struct device_node *node,
239 i++; 242 i++;
240 } 243 }
241 244
242 ret = asoc_simple_card_parse_card_name(&priv->snd_card, PREFIX); 245 ret = asoc_simple_card_parse_card_name(card, PREFIX);
243 if (ret < 0) 246 if (ret < 0)
244 return ret; 247 return ret;
245 248
246 dev_dbg(dev, "New card: %s\n", 249 dev_dbg(dev, "New card: %s\n",
247 priv->snd_card.name ? priv->snd_card.name : ""); 250 card->name ? card->name : "");
248 dev_dbg(dev, "convert_rate %d\n", priv->convert_rate); 251 dev_dbg(dev, "convert_rate %d\n", priv->convert_rate);
249 dev_dbg(dev, "convert_channels %d\n", priv->convert_channels); 252 dev_dbg(dev, "convert_channels %d\n", priv->convert_channels);
250 253
@@ -256,8 +259,9 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
256 struct simple_card_data *priv; 259 struct simple_card_data *priv;
257 struct snd_soc_dai_link *dai_link; 260 struct snd_soc_dai_link *dai_link;
258 struct asoc_simple_dai *dai_props; 261 struct asoc_simple_dai *dai_props;
262 struct snd_soc_card *card;
259 struct device *dev = &pdev->dev; 263 struct device *dev = &pdev->dev;
260 struct device_node *np = pdev->dev.of_node; 264 struct device_node *np = dev->of_node;
261 int num, ret; 265 int num, ret;
262 266
263 /* Allocate the private data */ 267 /* Allocate the private data */
@@ -276,12 +280,13 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
276 priv->dai_link = dai_link; 280 priv->dai_link = dai_link;
277 281
278 /* Init snd_soc_card */ 282 /* Init snd_soc_card */
279 priv->snd_card.owner = THIS_MODULE; 283 card = simple_priv_to_card(priv);
280 priv->snd_card.dev = dev; 284 card->owner = THIS_MODULE;
281 priv->snd_card.dai_link = priv->dai_link; 285 card->dev = dev;
282 priv->snd_card.num_links = num; 286 card->dai_link = priv->dai_link;
283 priv->snd_card.codec_conf = &priv->codec_conf; 287 card->num_links = num;
284 priv->snd_card.num_configs = 1; 288 card->codec_conf = &priv->codec_conf;
289 card->num_configs = 1;
285 290
286 ret = asoc_simple_card_parse_of(np, priv); 291 ret = asoc_simple_card_parse_of(np, priv);
287 if (ret < 0) { 292 if (ret < 0) {
@@ -290,13 +295,13 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
290 goto err; 295 goto err;
291 } 296 }
292 297
293 snd_soc_card_set_drvdata(&priv->snd_card, priv); 298 snd_soc_card_set_drvdata(card, priv);
294 299
295 ret = devm_snd_soc_register_card(&pdev->dev, &priv->snd_card); 300 ret = devm_snd_soc_register_card(dev, card);
296 if (ret >= 0) 301 if (ret >= 0)
297 return ret; 302 return ret;
298err: 303err:
299 asoc_simple_card_clean_reference(&priv->snd_card); 304 asoc_simple_card_clean_reference(card);
300 305
301 return ret; 306 return ret;
302} 307}
diff --git a/sound/soc/hisilicon/Kconfig b/sound/soc/hisilicon/Kconfig
new file mode 100644
index 000000000000..4356d5a1d338
--- /dev/null
+++ b/sound/soc/hisilicon/Kconfig
@@ -0,0 +1,5 @@
1config SND_I2S_HI6210_I2S
2 tristate "Hisilicon I2S controller"
3 select SND_SOC_GENERIC_DMAENGINE_PCM
4 help
5 Hisilicon I2S
diff --git a/sound/soc/hisilicon/Makefile b/sound/soc/hisilicon/Makefile
new file mode 100644
index 000000000000..e8095e2af91a
--- /dev/null
+++ b/sound/soc/hisilicon/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_SND_I2S_HI6210_I2S) += hi6210-i2s.o
diff --git a/sound/soc/hisilicon/hi6210-i2s.c b/sound/soc/hisilicon/hi6210-i2s.c
new file mode 100644
index 000000000000..45163e5202f5
--- /dev/null
+++ b/sound/soc/hisilicon/hi6210-i2s.c
@@ -0,0 +1,618 @@
1/*
2 * linux/sound/soc/m8m/hi6210_i2s.c - I2S IP driver
3 *
4 * Copyright (C) 2015 Linaro, Ltd
5 * Author: Andy Green <andy.green@linaro.org>
6 *
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * This driver only deals with S2 interface (BT)
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/device.h>
22#include <linux/delay.h>
23#include <linux/clk.h>
24#include <linux/jiffies.h>
25#include <linux/io.h>
26#include <linux/gpio.h>
27#include <sound/core.h>
28#include <sound/pcm.h>
29#include <sound/pcm_params.h>
30#include <sound/dmaengine_pcm.h>
31#include <sound/initval.h>
32#include <sound/soc.h>
33#include <linux/interrupt.h>
34#include <linux/reset.h>
35#include <linux/of_address.h>
36#include <linux/of_irq.h>
37#include <linux/mfd/syscon.h>
38#include <linux/reset-controller.h>
39#include <linux/clk.h>
40
41#include "hi6210-i2s.h"
42
43struct hi6210_i2s {
44 struct device *dev;
45 struct reset_control *rc;
46 struct clk *clk[8];
47 int clocks;
48 struct snd_soc_dai_driver dai;
49 void __iomem *base;
50 struct regmap *sysctrl;
51 phys_addr_t base_phys;
52 struct snd_dmaengine_dai_dma_data dma_data[2];
53 int clk_rate;
54 spinlock_t lock;
55 int rate;
56 int format;
57 u8 bits;
58 u8 channels;
59 u8 id;
60 u8 channel_length;
61 u8 use;
62 u32 master:1;
63 u32 status:1;
64};
65
66#define SC_PERIPH_CLKEN1 0x210
67#define SC_PERIPH_CLKDIS1 0x214
68
69#define SC_PERIPH_CLKEN3 0x230
70#define SC_PERIPH_CLKDIS3 0x234
71
72#define SC_PERIPH_CLKEN12 0x270
73#define SC_PERIPH_CLKDIS12 0x274
74
75#define SC_PERIPH_RSTEN1 0x310
76#define SC_PERIPH_RSTDIS1 0x314
77#define SC_PERIPH_RSTSTAT1 0x318
78
79#define SC_PERIPH_RSTEN2 0x320
80#define SC_PERIPH_RSTDIS2 0x324
81#define SC_PERIPH_RSTSTAT2 0x328
82
83#define SOC_PMCTRL_BBPPLLALIAS 0x48
84
85enum {
86 CLK_DACODEC,
87 CLK_I2S_BASE,
88};
89
90static inline void hi6210_write_reg(struct hi6210_i2s *i2s, int reg, u32 val)
91{
92 writel(val, i2s->base + reg);
93}
94
95static inline u32 hi6210_read_reg(struct hi6210_i2s *i2s, int reg)
96{
97 return readl(i2s->base + reg);
98}
99
100int hi6210_i2s_startup(struct snd_pcm_substream *substream,
101 struct snd_soc_dai *cpu_dai)
102{
103 struct hi6210_i2s *i2s = dev_get_drvdata(cpu_dai->dev);
104 int ret, n;
105 u32 val;
106
107 /* deassert reset on ABB */
108 regmap_read(i2s->sysctrl, SC_PERIPH_RSTSTAT2, &val);
109 if (val & BIT(4))
110 regmap_write(i2s->sysctrl, SC_PERIPH_RSTDIS2, BIT(4));
111
112 for (n = 0; n < i2s->clocks; n++) {
113 ret = clk_prepare_enable(i2s->clk[n]);
114 if (ret) {
115 while (n--)
116 clk_disable_unprepare(i2s->clk[n]);
117 return ret;
118 }
119 }
120
121 ret = clk_set_rate(i2s->clk[CLK_I2S_BASE], 49152000);
122 if (ret) {
123 dev_err(i2s->dev, "%s: setting 49.152MHz base rate failed %d\n",
124 __func__, ret);
125 return ret;
126 }
127
128 /* enable clock before frequency division */
129 regmap_write(i2s->sysctrl, SC_PERIPH_CLKEN12, BIT(9));
130
131 /* enable codec working clock / == "codec bus clock" */
132 regmap_write(i2s->sysctrl, SC_PERIPH_CLKEN1, BIT(5));
133
134 /* deassert reset on codec / interface clock / working clock */
135 regmap_write(i2s->sysctrl, SC_PERIPH_RSTEN1, BIT(5));
136 regmap_write(i2s->sysctrl, SC_PERIPH_RSTDIS1, BIT(5));
137
138 /* not interested in i2s irqs */
139 val = hi6210_read_reg(i2s, HII2S_CODEC_IRQ_MASK);
140 val |= 0x3f;
141 hi6210_write_reg(i2s, HII2S_CODEC_IRQ_MASK, val);
142
143
144 /* reset the stereo downlink fifo */
145 val = hi6210_read_reg(i2s, HII2S_APB_AFIFO_CFG_1);
146 val |= (BIT(5) | BIT(4));
147 hi6210_write_reg(i2s, HII2S_APB_AFIFO_CFG_1, val);
148
149 val = hi6210_read_reg(i2s, HII2S_APB_AFIFO_CFG_1);
150 val &= ~(BIT(5) | BIT(4));
151 hi6210_write_reg(i2s, HII2S_APB_AFIFO_CFG_1, val);
152
153
154 val = hi6210_read_reg(i2s, HII2S_SW_RST_N);
155 val &= ~(HII2S_SW_RST_N__ST_DL_WORDLEN_MASK <<
156 HII2S_SW_RST_N__ST_DL_WORDLEN_SHIFT);
157 val |= (HII2S_BITS_16 << HII2S_SW_RST_N__ST_DL_WORDLEN_SHIFT);
158 hi6210_write_reg(i2s, HII2S_SW_RST_N, val);
159
160 val = hi6210_read_reg(i2s, HII2S_MISC_CFG);
161 /* mux 11/12 = APB not i2s */
162 val &= ~HII2S_MISC_CFG__ST_DL_TEST_SEL;
163 /* BT R ch 0 = mixer op of DACR ch */
164 val &= ~HII2S_MISC_CFG__S2_DOUT_RIGHT_SEL;
165 val &= ~HII2S_MISC_CFG__S2_DOUT_TEST_SEL;
166
167 val |= HII2S_MISC_CFG__S2_DOUT_RIGHT_SEL;
168 /* BT L ch = 1 = mux 7 = "mixer output of DACL */
169 val |= HII2S_MISC_CFG__S2_DOUT_TEST_SEL;
170 hi6210_write_reg(i2s, HII2S_MISC_CFG, val);
171
172 val = hi6210_read_reg(i2s, HII2S_SW_RST_N);
173 val |= HII2S_SW_RST_N__SW_RST_N;
174 hi6210_write_reg(i2s, HII2S_SW_RST_N, val);
175
176 return 0;
177}
178void hi6210_i2s_shutdown(struct snd_pcm_substream *substream,
179 struct snd_soc_dai *cpu_dai)
180{
181 struct hi6210_i2s *i2s = dev_get_drvdata(cpu_dai->dev);
182 int n;
183
184 for (n = 0; n < i2s->clocks; n++)
185 clk_disable_unprepare(i2s->clk[n]);
186
187 regmap_write(i2s->sysctrl, SC_PERIPH_RSTEN1, BIT(5));
188}
189
190static void hi6210_i2s_txctrl(struct snd_soc_dai *cpu_dai, int on)
191{
192 struct hi6210_i2s *i2s = dev_get_drvdata(cpu_dai->dev);
193 u32 val;
194
195 spin_lock(&i2s->lock);
196 if (on) {
197 /* enable S2 TX */
198 val = hi6210_read_reg(i2s, HII2S_I2S_CFG);
199 val |= HII2S_I2S_CFG__S2_IF_TX_EN;
200 hi6210_write_reg(i2s, HII2S_I2S_CFG, val);
201 } else {
202 /* disable S2 TX */
203 val = hi6210_read_reg(i2s, HII2S_I2S_CFG);
204 val &= ~HII2S_I2S_CFG__S2_IF_TX_EN;
205 hi6210_write_reg(i2s, HII2S_I2S_CFG, val);
206 }
207 spin_unlock(&i2s->lock);
208}
209
210static void hi6210_i2s_rxctrl(struct snd_soc_dai *cpu_dai, int on)
211{
212 struct hi6210_i2s *i2s = dev_get_drvdata(cpu_dai->dev);
213 u32 val;
214
215 spin_lock(&i2s->lock);
216 if (on) {
217 val = hi6210_read_reg(i2s, HII2S_I2S_CFG);
218 val |= HII2S_I2S_CFG__S2_IF_RX_EN;
219 hi6210_write_reg(i2s, HII2S_I2S_CFG, val);
220 } else {
221 val = hi6210_read_reg(i2s, HII2S_I2S_CFG);
222 val &= ~HII2S_I2S_CFG__S2_IF_RX_EN;
223 hi6210_write_reg(i2s, HII2S_I2S_CFG, val);
224 }
225 spin_unlock(&i2s->lock);
226}
227
228static int hi6210_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
229{
230 struct hi6210_i2s *i2s = dev_get_drvdata(cpu_dai->dev);
231
232 /*
233 * We don't actually set the hardware until the hw_params
234 * call, but we need to validate the user input here.
235 */
236 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
237 case SND_SOC_DAIFMT_CBM_CFM:
238 case SND_SOC_DAIFMT_CBS_CFS:
239 break;
240 default:
241 return -EINVAL;
242 }
243
244 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
245 case SND_SOC_DAIFMT_I2S:
246 case SND_SOC_DAIFMT_LEFT_J:
247 case SND_SOC_DAIFMT_RIGHT_J:
248 break;
249 default:
250 return -EINVAL;
251 }
252
253 i2s->format = fmt;
254 i2s->master = (i2s->format & SND_SOC_DAIFMT_MASTER_MASK) ==
255 SND_SOC_DAIFMT_CBS_CFS;
256
257 return 0;
258}
259
260static int hi6210_i2s_hw_params(struct snd_pcm_substream *substream,
261 struct snd_pcm_hw_params *params,
262 struct snd_soc_dai *cpu_dai)
263{
264 struct hi6210_i2s *i2s = dev_get_drvdata(cpu_dai->dev);
265 u32 bits = 0, rate = 0, signed_data = 0, fmt = 0;
266 u32 val;
267 struct snd_dmaengine_dai_dma_data *dma_data;
268
269 switch (params_format(params)) {
270 case SNDRV_PCM_FORMAT_U16_LE:
271 signed_data = HII2S_I2S_CFG__S2_CODEC_DATA_FORMAT;
272 /* fallthru */
273 case SNDRV_PCM_FORMAT_S16_LE:
274 bits = HII2S_BITS_16;
275 break;
276 case SNDRV_PCM_FORMAT_U24_LE:
277 signed_data = HII2S_I2S_CFG__S2_CODEC_DATA_FORMAT;
278 /* fallthru */
279 case SNDRV_PCM_FORMAT_S24_LE:
280 bits = HII2S_BITS_24;
281 break;
282 default:
283 dev_err(cpu_dai->dev, "Bad format\n");
284 return -EINVAL;
285 }
286
287
288 switch (params_rate(params)) {
289 case 8000:
290 rate = HII2S_FS_RATE_8KHZ;
291 break;
292 case 16000:
293 rate = HII2S_FS_RATE_16KHZ;
294 break;
295 case 32000:
296 rate = HII2S_FS_RATE_32KHZ;
297 break;
298 case 48000:
299 rate = HII2S_FS_RATE_48KHZ;
300 break;
301 case 96000:
302 rate = HII2S_FS_RATE_96KHZ;
303 break;
304 case 192000:
305 rate = HII2S_FS_RATE_192KHZ;
306 break;
307 default:
308 dev_err(cpu_dai->dev, "Bad rate: %d\n", params_rate(params));
309 return -EINVAL;
310 }
311
312 if (!(params_channels(params))) {
313 dev_err(cpu_dai->dev, "Bad channels\n");
314 return -EINVAL;
315 }
316
317 dma_data = snd_soc_dai_get_dma_data(cpu_dai, substream);
318
319 switch (bits) {
320 case HII2S_BITS_24:
321 i2s->bits = 32;
322 dma_data->addr_width = 3;
323 break;
324 default:
325 i2s->bits = 16;
326 dma_data->addr_width = 2;
327 break;
328 }
329 i2s->rate = params_rate(params);
330 i2s->channels = params_channels(params);
331 i2s->channel_length = i2s->channels * i2s->bits;
332
333 val = hi6210_read_reg(i2s, HII2S_ST_DL_FIFO_TH_CFG);
334 val &= ~((HII2S_ST_DL_FIFO_TH_CFG__ST_DL_R_AEMPTY_MASK <<
335 HII2S_ST_DL_FIFO_TH_CFG__ST_DL_R_AEMPTY_SHIFT) |
336 (HII2S_ST_DL_FIFO_TH_CFG__ST_DL_R_AFULL_MASK <<
337 HII2S_ST_DL_FIFO_TH_CFG__ST_DL_R_AFULL_SHIFT) |
338 (HII2S_ST_DL_FIFO_TH_CFG__ST_DL_L_AEMPTY_MASK <<
339 HII2S_ST_DL_FIFO_TH_CFG__ST_DL_L_AEMPTY_SHIFT) |
340 (HII2S_ST_DL_FIFO_TH_CFG__ST_DL_L_AFULL_MASK <<
341 HII2S_ST_DL_FIFO_TH_CFG__ST_DL_L_AFULL_SHIFT));
342 val |= ((16 << HII2S_ST_DL_FIFO_TH_CFG__ST_DL_R_AEMPTY_SHIFT) |
343 (30 << HII2S_ST_DL_FIFO_TH_CFG__ST_DL_R_AFULL_SHIFT) |
344 (16 << HII2S_ST_DL_FIFO_TH_CFG__ST_DL_L_AEMPTY_SHIFT) |
345 (30 << HII2S_ST_DL_FIFO_TH_CFG__ST_DL_L_AFULL_SHIFT));
346 hi6210_write_reg(i2s, HII2S_ST_DL_FIFO_TH_CFG, val);
347
348
349 val = hi6210_read_reg(i2s, HII2S_IF_CLK_EN_CFG);
350 val |= (BIT(19) | BIT(18) | BIT(17) |
351 HII2S_IF_CLK_EN_CFG__S2_IF_CLK_EN |
352 HII2S_IF_CLK_EN_CFG__S2_OL_MIXER_EN |
353 HII2S_IF_CLK_EN_CFG__S2_OL_SRC_EN |
354 HII2S_IF_CLK_EN_CFG__ST_DL_R_EN |
355 HII2S_IF_CLK_EN_CFG__ST_DL_L_EN);
356 hi6210_write_reg(i2s, HII2S_IF_CLK_EN_CFG, val);
357
358
359 val = hi6210_read_reg(i2s, HII2S_DIG_FILTER_CLK_EN_CFG);
360 val &= ~(HII2S_DIG_FILTER_CLK_EN_CFG__DACR_SDM_EN |
361 HII2S_DIG_FILTER_CLK_EN_CFG__DACR_HBF2I_EN |
362 HII2S_DIG_FILTER_CLK_EN_CFG__DACR_AGC_EN |
363 HII2S_DIG_FILTER_CLK_EN_CFG__DACL_SDM_EN |
364 HII2S_DIG_FILTER_CLK_EN_CFG__DACL_HBF2I_EN |
365 HII2S_DIG_FILTER_CLK_EN_CFG__DACL_AGC_EN);
366 val |= (HII2S_DIG_FILTER_CLK_EN_CFG__DACR_MIXER_EN |
367 HII2S_DIG_FILTER_CLK_EN_CFG__DACL_MIXER_EN);
368 hi6210_write_reg(i2s, HII2S_DIG_FILTER_CLK_EN_CFG, val);
369
370
371 val = hi6210_read_reg(i2s, HII2S_DIG_FILTER_MODULE_CFG);
372 val &= ~(HII2S_DIG_FILTER_MODULE_CFG__DACR_MIXER_IN2_MUTE |
373 HII2S_DIG_FILTER_MODULE_CFG__DACL_MIXER_IN2_MUTE);
374 hi6210_write_reg(i2s, HII2S_DIG_FILTER_MODULE_CFG, val);
375
376 val = hi6210_read_reg(i2s, HII2S_MUX_TOP_MODULE_CFG);
377 val &= ~(HII2S_MUX_TOP_MODULE_CFG__S2_OL_MIXER_IN1_MUTE |
378 HII2S_MUX_TOP_MODULE_CFG__S2_OL_MIXER_IN2_MUTE |
379 HII2S_MUX_TOP_MODULE_CFG__VOICE_DLINK_MIXER_IN1_MUTE |
380 HII2S_MUX_TOP_MODULE_CFG__VOICE_DLINK_MIXER_IN2_MUTE);
381 hi6210_write_reg(i2s, HII2S_MUX_TOP_MODULE_CFG, val);
382
383
384 switch (i2s->format & SND_SOC_DAIFMT_MASTER_MASK) {
385 case SND_SOC_DAIFMT_CBM_CFM:
386 i2s->master = false;
387 val = hi6210_read_reg(i2s, HII2S_I2S_CFG);
388 val |= HII2S_I2S_CFG__S2_MST_SLV;
389 hi6210_write_reg(i2s, HII2S_I2S_CFG, val);
390 break;
391 case SND_SOC_DAIFMT_CBS_CFS:
392 i2s->master = true;
393 val = hi6210_read_reg(i2s, HII2S_I2S_CFG);
394 val &= ~HII2S_I2S_CFG__S2_MST_SLV;
395 hi6210_write_reg(i2s, HII2S_I2S_CFG, val);
396 break;
397 default:
398 WARN_ONCE(1, "Invalid i2s->fmt MASTER_MASK. This shouldn't happen\n");
399 return -EINVAL;
400 }
401
402 switch (i2s->format & SND_SOC_DAIFMT_FORMAT_MASK) {
403 case SND_SOC_DAIFMT_I2S:
404 fmt = HII2S_FORMAT_I2S;
405 break;
406 case SND_SOC_DAIFMT_LEFT_J:
407 fmt = HII2S_FORMAT_LEFT_JUST;
408 break;
409 case SND_SOC_DAIFMT_RIGHT_J:
410 fmt = HII2S_FORMAT_RIGHT_JUST;
411 break;
412 default:
413 WARN_ONCE(1, "Invalid i2s->fmt FORMAT_MASK. This shouldn't happen\n");
414 return -EINVAL;
415 }
416
417 val = hi6210_read_reg(i2s, HII2S_I2S_CFG);
418 val &= ~(HII2S_I2S_CFG__S2_FUNC_MODE_MASK <<
419 HII2S_I2S_CFG__S2_FUNC_MODE_SHIFT);
420 val |= fmt << HII2S_I2S_CFG__S2_FUNC_MODE_SHIFT;
421 hi6210_write_reg(i2s, HII2S_I2S_CFG, val);
422
423
424 val = hi6210_read_reg(i2s, HII2S_CLK_SEL);
425 val &= ~(HII2S_CLK_SEL__I2S_BT_FM_SEL | /* BT gets the I2S */
426 HII2S_CLK_SEL__EXT_12_288MHZ_SEL);
427 hi6210_write_reg(i2s, HII2S_CLK_SEL, val);
428
429 dma_data->maxburst = 2;
430
431 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
432 dma_data->addr = i2s->base_phys + HII2S_ST_DL_CHANNEL;
433 else
434 dma_data->addr = i2s->base_phys + HII2S_STEREO_UPLINK_CHANNEL;
435
436 switch (i2s->channels) {
437 case 1:
438 val = hi6210_read_reg(i2s, HII2S_I2S_CFG);
439 val |= HII2S_I2S_CFG__S2_FRAME_MODE;
440 hi6210_write_reg(i2s, HII2S_I2S_CFG, val);
441 break;
442 default:
443 val = hi6210_read_reg(i2s, HII2S_I2S_CFG);
444 val &= ~HII2S_I2S_CFG__S2_FRAME_MODE;
445 hi6210_write_reg(i2s, HII2S_I2S_CFG, val);
446 break;
447 }
448
449 /* clear loopback, set signed type and word length */
450 val = hi6210_read_reg(i2s, HII2S_I2S_CFG);
451 val &= ~HII2S_I2S_CFG__S2_CODEC_DATA_FORMAT;
452 val &= ~(HII2S_I2S_CFG__S2_CODEC_IO_WORDLENGTH_MASK <<
453 HII2S_I2S_CFG__S2_CODEC_IO_WORDLENGTH_SHIFT);
454 val &= ~(HII2S_I2S_CFG__S2_DIRECT_LOOP_MASK <<
455 HII2S_I2S_CFG__S2_DIRECT_LOOP_SHIFT);
456 val |= signed_data;
457 val |= (bits << HII2S_I2S_CFG__S2_CODEC_IO_WORDLENGTH_SHIFT);
458 hi6210_write_reg(i2s, HII2S_I2S_CFG, val);
459
460
461 if (!i2s->master)
462 return 0;
463
464 /* set DAC and related units to correct rate */
465 val = hi6210_read_reg(i2s, HII2S_FS_CFG);
466 val &= ~(HII2S_FS_CFG__FS_S2_MASK << HII2S_FS_CFG__FS_S2_SHIFT);
467 val &= ~(HII2S_FS_CFG__FS_DACLR_MASK << HII2S_FS_CFG__FS_DACLR_SHIFT);
468 val &= ~(HII2S_FS_CFG__FS_ST_DL_R_MASK <<
469 HII2S_FS_CFG__FS_ST_DL_R_SHIFT);
470 val &= ~(HII2S_FS_CFG__FS_ST_DL_L_MASK <<
471 HII2S_FS_CFG__FS_ST_DL_L_SHIFT);
472 val |= (rate << HII2S_FS_CFG__FS_S2_SHIFT);
473 val |= (rate << HII2S_FS_CFG__FS_DACLR_SHIFT);
474 val |= (rate << HII2S_FS_CFG__FS_ST_DL_R_SHIFT);
475 val |= (rate << HII2S_FS_CFG__FS_ST_DL_L_SHIFT);
476 hi6210_write_reg(i2s, HII2S_FS_CFG, val);
477
478 return 0;
479}
480
481static int hi6210_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
482 struct snd_soc_dai *cpu_dai)
483{
484 pr_debug("%s\n", __func__);
485 switch (cmd) {
486 case SNDRV_PCM_TRIGGER_START:
487 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
488 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
489 hi6210_i2s_rxctrl(cpu_dai, 1);
490 else
491 hi6210_i2s_txctrl(cpu_dai, 1);
492 break;
493 case SNDRV_PCM_TRIGGER_STOP:
494 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
495 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
496 hi6210_i2s_rxctrl(cpu_dai, 0);
497 else
498 hi6210_i2s_txctrl(cpu_dai, 0);
499 break;
500 default:
501 dev_err(cpu_dai->dev, "uknown cmd\n");
502 return -EINVAL;
503 }
504 return 0;
505}
506
507static int hi6210_i2s_dai_probe(struct snd_soc_dai *dai)
508{
509 struct hi6210_i2s *i2s = snd_soc_dai_get_drvdata(dai);
510
511 snd_soc_dai_init_dma_data(dai,
512 &i2s->dma_data[SNDRV_PCM_STREAM_PLAYBACK],
513 &i2s->dma_data[SNDRV_PCM_STREAM_CAPTURE]);
514
515 return 0;
516}
517
518
519static struct snd_soc_dai_ops hi6210_i2s_dai_ops = {
520 .trigger = hi6210_i2s_trigger,
521 .hw_params = hi6210_i2s_hw_params,
522 .set_fmt = hi6210_i2s_set_fmt,
523 .startup = hi6210_i2s_startup,
524 .shutdown = hi6210_i2s_shutdown,
525};
526
527struct snd_soc_dai_driver hi6210_i2s_dai_init = {
528 .probe = hi6210_i2s_dai_probe,
529 .playback = {
530 .channels_min = 2,
531 .channels_max = 2,
532 .formats = SNDRV_PCM_FMTBIT_S16_LE |
533 SNDRV_PCM_FMTBIT_U16_LE,
534 .rates = SNDRV_PCM_RATE_48000,
535 },
536 .capture = {
537 .channels_min = 2,
538 .channels_max = 2,
539 .formats = SNDRV_PCM_FMTBIT_S16_LE |
540 SNDRV_PCM_FMTBIT_U16_LE,
541 .rates = SNDRV_PCM_RATE_48000,
542 },
543 .ops = &hi6210_i2s_dai_ops,
544};
545
546static const struct snd_soc_component_driver hi6210_i2s_i2s_comp = {
547 .name = "hi6210_i2s-i2s",
548};
549
550static int hi6210_i2s_probe(struct platform_device *pdev)
551{
552 struct device_node *node = pdev->dev.of_node;
553 struct device *dev = &pdev->dev;
554 struct hi6210_i2s *i2s;
555 struct resource *res;
556 int ret;
557
558 i2s = devm_kzalloc(&pdev->dev, sizeof(*i2s), GFP_KERNEL);
559 if (!i2s)
560 return -ENOMEM;
561
562 i2s->dev = dev;
563 spin_lock_init(&i2s->lock);
564
565 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
566 i2s->base = devm_ioremap_resource(dev, res);
567 if (IS_ERR(i2s->base))
568 return PTR_ERR(i2s->base);
569
570 i2s->base_phys = (phys_addr_t)res->start;
571 i2s->dai = hi6210_i2s_dai_init;
572
573 dev_set_drvdata(&pdev->dev, i2s);
574
575 i2s->sysctrl = syscon_regmap_lookup_by_phandle(node,
576 "hisilicon,sysctrl-syscon");
577 if (IS_ERR(i2s->sysctrl))
578 return PTR_ERR(i2s->sysctrl);
579
580 i2s->clk[CLK_DACODEC] = devm_clk_get(&pdev->dev, "dacodec");
581 if (IS_ERR_OR_NULL(i2s->clk[CLK_DACODEC]))
582 return PTR_ERR(i2s->clk[CLK_DACODEC]);
583 i2s->clocks++;
584
585 i2s->clk[CLK_I2S_BASE] = devm_clk_get(&pdev->dev, "i2s-base");
586 if (IS_ERR_OR_NULL(i2s->clk[CLK_I2S_BASE]))
587 return PTR_ERR(i2s->clk[CLK_I2S_BASE]);
588 i2s->clocks++;
589
590 ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
591 if (ret)
592 return ret;
593
594 ret = devm_snd_soc_register_component(&pdev->dev, &hi6210_i2s_i2s_comp,
595 &i2s->dai, 1);
596 return ret;
597}
598
599static const struct of_device_id hi6210_i2s_dt_ids[] = {
600 { .compatible = "hisilicon,hi6210-i2s" },
601 { /* sentinel */ }
602};
603
604MODULE_DEVICE_TABLE(of, hi6210_i2s_dt_ids);
605
606static struct platform_driver hi6210_i2s_driver = {
607 .probe = hi6210_i2s_probe,
608 .driver = {
609 .name = "hi6210_i2s",
610 .of_match_table = hi6210_i2s_dt_ids,
611 },
612};
613
614module_platform_driver(hi6210_i2s_driver);
615
616MODULE_DESCRIPTION("Hisilicon HI6210 I2S driver");
617MODULE_AUTHOR("Andy Green <andy.green@linaro.org>");
618MODULE_LICENSE("GPL");
diff --git a/sound/soc/hisilicon/hi6210-i2s.h b/sound/soc/hisilicon/hi6210-i2s.h
new file mode 100644
index 000000000000..85cecc4939a0
--- /dev/null
+++ b/sound/soc/hisilicon/hi6210-i2s.h
@@ -0,0 +1,276 @@
1/*
2 * linux/sound/soc/hisilicon/hi6210-i2s.h
3 *
4 * Copyright (C) 2015 Linaro, Ltd
5 * Author: Andy Green <andy.green@linaro.org>
6 *
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * Note at least on 6220, S2 == BT, S1 == Digital FM Radio IF
20 */
21
22#ifndef _HI6210_I2S_H
23#define _HI6210_I2S_H
24
25#define HII2S_SW_RST_N 0
26
27#define HII2S_SW_RST_N__STEREO_UPLINK_WORDLEN_SHIFT 28
28#define HII2S_SW_RST_N__STEREO_UPLINK_WORDLEN_MASK 3
29#define HII2S_SW_RST_N__THIRDMD_UPLINK_WORDLEN_SHIFT 26
30#define HII2S_SW_RST_N__THIRDMD_UPLINK_WORDLEN_MASK 3
31#define HII2S_SW_RST_N__VOICE_UPLINK_WORDLEN_SHIFT 24
32#define HII2S_SW_RST_N__VOICE_UPLINK_WORDLEN_MASK 3
33#define HII2S_SW_RST_N__ST_DL_WORDLEN_SHIFT 20
34#define HII2S_SW_RST_N__ST_DL_WORDLEN_MASK 3
35#define HII2S_SW_RST_N__THIRDMD_DLINK_WORDLEN_SHIFT 18
36#define HII2S_SW_RST_N__THIRDMD_DLINK_WORDLEN_MASK 3
37#define HII2S_SW_RST_N__VOICE_DLINK_WORDLEN_SHIFT 16
38#define HII2S_SW_RST_N__VOICE_DLINK_WORDLEN_MASK 3
39
40#define HII2S_SW_RST_N__SW_RST_N BIT(0)
41
42enum hi6210_bits {
43 HII2S_BITS_16,
44 HII2S_BITS_18,
45 HII2S_BITS_20,
46 HII2S_BITS_24,
47};
48
49
50#define HII2S_IF_CLK_EN_CFG 4
51
52#define HII2S_IF_CLK_EN_CFG__THIRDMD_UPLINK_EN BIT(25)
53#define HII2S_IF_CLK_EN_CFG__THIRDMD_DLINK_EN BIT(24)
54#define HII2S_IF_CLK_EN_CFG__S3_IF_CLK_EN BIT(20)
55#define HII2S_IF_CLK_EN_CFG__S2_IF_CLK_EN BIT(16)
56#define HII2S_IF_CLK_EN_CFG__S2_OL_MIXER_EN BIT(15)
57#define HII2S_IF_CLK_EN_CFG__S2_OL_SRC_EN BIT(14)
58#define HII2S_IF_CLK_EN_CFG__S2_IR_PGA_EN BIT(13)
59#define HII2S_IF_CLK_EN_CFG__S2_IL_PGA_EN BIT(12)
60#define HII2S_IF_CLK_EN_CFG__S1_IR_PGA_EN BIT(10)
61#define HII2S_IF_CLK_EN_CFG__S1_IL_PGA_EN BIT(9)
62#define HII2S_IF_CLK_EN_CFG__S1_IF_CLK_EN BIT(8)
63#define HII2S_IF_CLK_EN_CFG__VOICE_DLINK_SRC_EN BIT(7)
64#define HII2S_IF_CLK_EN_CFG__VOICE_DLINK_EN BIT(6)
65#define HII2S_IF_CLK_EN_CFG__ST_DL_R_EN BIT(5)
66#define HII2S_IF_CLK_EN_CFG__ST_DL_L_EN BIT(4)
67#define HII2S_IF_CLK_EN_CFG__VOICE_UPLINK_R_EN BIT(3)
68#define HII2S_IF_CLK_EN_CFG__VOICE_UPLINK_L_EN BIT(2)
69#define HII2S_IF_CLK_EN_CFG__STEREO_UPLINK_R_EN BIT(1)
70#define HII2S_IF_CLK_EN_CFG__STEREO_UPLINK_L_EN BIT(0)
71
72#define HII2S_DIG_FILTER_CLK_EN_CFG 8
73#define HII2S_DIG_FILTER_CLK_EN_CFG__DACR_SDM_EN BIT(30)
74#define HII2S_DIG_FILTER_CLK_EN_CFG__DACR_HBF2I_EN BIT(28)
75#define HII2S_DIG_FILTER_CLK_EN_CFG__DACR_MIXER_EN BIT(25)
76#define HII2S_DIG_FILTER_CLK_EN_CFG__DACR_AGC_EN BIT(24)
77#define HII2S_DIG_FILTER_CLK_EN_CFG__DACL_SDM_EN BIT(22)
78#define HII2S_DIG_FILTER_CLK_EN_CFG__DACL_HBF2I_EN BIT(20)
79#define HII2S_DIG_FILTER_CLK_EN_CFG__DACL_MIXER_EN BIT(17)
80#define HII2S_DIG_FILTER_CLK_EN_CFG__DACL_AGC_EN BIT(16)
81
82#define HII2S_FS_CFG 0xc
83
84#define HII2S_FS_CFG__FS_S2_SHIFT 28
85#define HII2S_FS_CFG__FS_S2_MASK 7
86#define HII2S_FS_CFG__FS_S1_SHIFT 24
87#define HII2S_FS_CFG__FS_S1_MASK 7
88#define HII2S_FS_CFG__FS_ADCLR_SHIFT 20
89#define HII2S_FS_CFG__FS_ADCLR_MASK 7
90#define HII2S_FS_CFG__FS_DACLR_SHIFT 16
91#define HII2S_FS_CFG__FS_DACLR_MASK 7
92#define HII2S_FS_CFG__FS_ST_DL_R_SHIFT 8
93#define HII2S_FS_CFG__FS_ST_DL_R_MASK 7
94#define HII2S_FS_CFG__FS_ST_DL_L_SHIFT 4
95#define HII2S_FS_CFG__FS_ST_DL_L_MASK 7
96#define HII2S_FS_CFG__FS_VOICE_DLINK_SHIFT 0
97#define HII2S_FS_CFG__FS_VOICE_DLINK_MASK 7
98
99enum hi6210_i2s_rates {
100 HII2S_FS_RATE_8KHZ = 0,
101 HII2S_FS_RATE_16KHZ = 1,
102 HII2S_FS_RATE_32KHZ = 2,
103 HII2S_FS_RATE_48KHZ = 4,
104 HII2S_FS_RATE_96KHZ = 5,
105 HII2S_FS_RATE_192KHZ = 6,
106};
107
108#define HII2S_I2S_CFG 0x10
109
110#define HII2S_I2S_CFG__S2_IF_TX_EN BIT(31)
111#define HII2S_I2S_CFG__S2_IF_RX_EN BIT(30)
112#define HII2S_I2S_CFG__S2_FRAME_MODE BIT(29)
113#define HII2S_I2S_CFG__S2_MST_SLV BIT(28)
114#define HII2S_I2S_CFG__S2_LRCK_MODE BIT(27)
115#define HII2S_I2S_CFG__S2_CHNNL_MODE BIT(26)
116#define HII2S_I2S_CFG__S2_CODEC_IO_WORDLENGTH_SHIFT 24
117#define HII2S_I2S_CFG__S2_CODEC_IO_WORDLENGTH_MASK 3
118#define HII2S_I2S_CFG__S2_DIRECT_LOOP_SHIFT 22
119#define HII2S_I2S_CFG__S2_DIRECT_LOOP_MASK 3
120#define HII2S_I2S_CFG__S2_TX_CLK_SEL BIT(21)
121#define HII2S_I2S_CFG__S2_RX_CLK_SEL BIT(20)
122#define HII2S_I2S_CFG__S2_CODEC_DATA_FORMAT BIT(19)
123#define HII2S_I2S_CFG__S2_FUNC_MODE_SHIFT 16
124#define HII2S_I2S_CFG__S2_FUNC_MODE_MASK 7
125#define HII2S_I2S_CFG__S1_IF_TX_EN BIT(15)
126#define HII2S_I2S_CFG__S1_IF_RX_EN BIT(14)
127#define HII2S_I2S_CFG__S1_FRAME_MODE BIT(13)
128#define HII2S_I2S_CFG__S1_MST_SLV BIT(12)
129#define HII2S_I2S_CFG__S1_LRCK_MODE BIT(11)
130#define HII2S_I2S_CFG__S1_CHNNL_MODE BIT(10)
131#define HII2S_I2S_CFG__S1_CODEC_IO_WORDLENGTH_SHIFT 8
132#define HII2S_I2S_CFG__S1_CODEC_IO_WORDLENGTH_MASK 3
133#define HII2S_I2S_CFG__S1_DIRECT_LOOP_SHIFT 6
134#define HII2S_I2S_CFG__S1_DIRECT_LOOP_MASK 3
135#define HII2S_I2S_CFG__S1_TX_CLK_SEL BIT(5)
136#define HII2S_I2S_CFG__S1_RX_CLK_SEL BIT(4)
137#define HII2S_I2S_CFG__S1_CODEC_DATA_FORMAT BIT(3)
138#define HII2S_I2S_CFG__S1_FUNC_MODE_SHIFT 0
139#define HII2S_I2S_CFG__S1_FUNC_MODE_MASK 7
140
141enum hi6210_i2s_formats {
142 HII2S_FORMAT_I2S,
143 HII2S_FORMAT_PCM_STD,
144 HII2S_FORMAT_PCM_USER,
145 HII2S_FORMAT_LEFT_JUST,
146 HII2S_FORMAT_RIGHT_JUST,
147};
148
149#define HII2S_DIG_FILTER_MODULE_CFG 0x14
150
151#define HII2S_DIG_FILTER_MODULE_CFG__DACR_MIXER_GAIN_SHIFT 28
152#define HII2S_DIG_FILTER_MODULE_CFG__DACR_MIXER_GAIN_MASK 3
153#define HII2S_DIG_FILTER_MODULE_CFG__DACR_MIXER_IN4_MUTE BIT(27)
154#define HII2S_DIG_FILTER_MODULE_CFG__DACR_MIXER_IN3_MUTE BIT(26)
155#define HII2S_DIG_FILTER_MODULE_CFG__DACR_MIXER_IN2_MUTE BIT(25)
156#define HII2S_DIG_FILTER_MODULE_CFG__DACR_MIXER_IN1_MUTE BIT(24)
157#define HII2S_DIG_FILTER_MODULE_CFG__DACL_MIXER_GAIN_SHIFT 20
158#define HII2S_DIG_FILTER_MODULE_CFG__DACL_MIXER_GAIN_MASK 3
159#define HII2S_DIG_FILTER_MODULE_CFG__DACL_MIXER_IN4_MUTE BIT(19)
160#define HII2S_DIG_FILTER_MODULE_CFG__DACL_MIXER_IN3_MUTE BIT(18)
161#define HII2S_DIG_FILTER_MODULE_CFG__DACL_MIXER_IN2_MUTE BIT(17)
162#define HII2S_DIG_FILTER_MODULE_CFG__DACL_MIXER_IN1_MUTE BIT(16)
163#define HII2S_DIG_FILTER_MODULE_CFG__SW_DACR_SDM_DITHER BIT(9)
164#define HII2S_DIG_FILTER_MODULE_CFG__SW_DACL_SDM_DITHER BIT(8)
165#define HII2S_DIG_FILTER_MODULE_CFG__LM_CODEC_DAC2ADC_SHIFT 4
166#define HII2S_DIG_FILTER_MODULE_CFG__LM_CODEC_DAC2ADC_MASK 7
167#define HII2S_DIG_FILTER_MODULE_CFG__RM_CODEC_DAC2ADC_SHIFT 0
168#define HII2S_DIG_FILTER_MODULE_CFG__RM_CODEC_DAC2ADC_MASK 7
169
170enum hi6210_gains {
171 HII2S_GAIN_100PC,
172 HII2S_GAIN_50PC,
173 HII2S_GAIN_25PC,
174};
175
176#define HII2S_MUX_TOP_MODULE_CFG 0x18
177
178#define HII2S_MUX_TOP_MODULE_CFG__VOICE_DLINK_MIXER_GAIN_SHIFT 14
179#define HII2S_MUX_TOP_MODULE_CFG__VOICE_DLINK_MIXER_GAIN_MASK 3
180#define HII2S_MUX_TOP_MODULE_CFG__VOICE_DLINK_MIXER_IN2_MUTE BIT(13)
181#define HII2S_MUX_TOP_MODULE_CFG__VOICE_DLINK_MIXER_IN1_MUTE BIT(12)
182#define HII2S_MUX_TOP_MODULE_CFG__S2_OL_MIXER_GAIN_SHIFT 10
183#define HII2S_MUX_TOP_MODULE_CFG__S2_OL_MIXER_GAIN_MASK 3
184#define HII2S_MUX_TOP_MODULE_CFG__S2_OL_MIXER_IN2_MUTE BIT(9)
185#define HII2S_MUX_TOP_MODULE_CFG__S2_OL_MIXER_IN1_MUTE BIT(8)
186#define HII2S_MUX_TOP_MODULE_CFG__S2_OL_SRC_RDY BIT(6)
187#define HII2S_MUX_TOP_MODULE_CFG__S2_OL_SRC_MODE_SHIFT 4
188#define HII2S_MUX_TOP_MODULE_CFG__S2_OL_SRC_MODE_MASK 3
189#define HII2S_MUX_TOP_MODULE_CFG__VOICE_DLINK_SRC_RDY BIT(3)
190#define HII2S_MUX_TOP_MODULE_CFG__VOICE_DLINK_SRC_MODE_SHIFT 0
191#define HII2S_MUX_TOP_MODULE_CFG__VOICE_DLINK_SRC_MODE_MASK 7
192
193enum hi6210_s2_src_mode {
194 HII2S_S2_SRC_MODE_3,
195 HII2S_S2_SRC_MODE_12,
196 HII2S_S2_SRC_MODE_6,
197 HII2S_S2_SRC_MODE_2,
198};
199
200enum hi6210_voice_dlink_src_mode {
201 HII2S_VOICE_DL_SRC_MODE_12 = 1,
202 HII2S_VOICE_DL_SRC_MODE_6,
203 HII2S_VOICE_DL_SRC_MODE_2,
204 HII2S_VOICE_DL_SRC_MODE_3,
205};
206
207#define HII2S_ADC_PGA_CFG 0x1c
208#define HII2S_S1_INPUT_PGA_CFG 0x20
209#define HII2S_S2_INPUT_PGA_CFG 0x24
210#define HII2S_ST_DL_PGA_CFG 0x28
211#define HII2S_VOICE_SIDETONE_DLINK_PGA_CFG 0x2c
212#define HII2S_APB_AFIFO_CFG_1 0x30
213#define HII2S_APB_AFIFO_CFG_2 0x34
214#define HII2S_ST_DL_FIFO_TH_CFG 0x38
215
216#define HII2S_ST_DL_FIFO_TH_CFG__ST_DL_R_AEMPTY_SHIFT 24
217#define HII2S_ST_DL_FIFO_TH_CFG__ST_DL_R_AEMPTY_MASK 0x1f
218#define HII2S_ST_DL_FIFO_TH_CFG__ST_DL_R_AFULL_SHIFT 16
219#define HII2S_ST_DL_FIFO_TH_CFG__ST_DL_R_AFULL_MASK 0x1f
220#define HII2S_ST_DL_FIFO_TH_CFG__ST_DL_L_AEMPTY_SHIFT 8
221#define HII2S_ST_DL_FIFO_TH_CFG__ST_DL_L_AEMPTY_MASK 0x1f
222#define HII2S_ST_DL_FIFO_TH_CFG__ST_DL_L_AFULL_SHIFT 0
223#define HII2S_ST_DL_FIFO_TH_CFG__ST_DL_L_AFULL_MASK 0x1f
224
225#define HII2S_STEREO_UPLINK_FIFO_TH_CFG 0x3c
226#define HII2S_VOICE_UPLINK_FIFO_TH_CFG 0x40
227#define HII2S_CODEC_IRQ_MASK 0x44
228#define HII2S_CODEC_IRQ 0x48
229#define HII2S_DACL_AGC_CFG_1 0x4c
230#define HII2S_DACL_AGC_CFG_2 0x50
231#define HII2S_DACR_AGC_CFG_1 0x54
232#define HII2S_DACR_AGC_CFG_2 0x58
233#define HII2S_DMIC_SIF_CFG 0x5c
234#define HII2S_MISC_CFG 0x60
235
236#define HII2S_MISC_CFG__THIRDMD_DLINK_TEST_SEL BIT(17)
237#define HII2S_MISC_CFG__THIRDMD_DLINK_DIN_SEL BIT(16)
238#define HII2S_MISC_CFG__S3_DOUT_RIGHT_SEL BIT(14)
239#define HII2S_MISC_CFG__S3_DOUT_LEFT_SEL BIT(13)
240#define HII2S_MISC_CFG__S3_DIN_TEST_SEL BIT(12)
241#define HII2S_MISC_CFG__VOICE_DLINK_SRC_UP_DOUT_VLD_SEL BIT(8)
242#define HII2S_MISC_CFG__VOICE_DLINK_TEST_SEL BIT(7)
243#define HII2S_MISC_CFG__VOICE_DLINK_DIN_SEL BIT(6)
244#define HII2S_MISC_CFG__ST_DL_TEST_SEL BIT(4)
245#define HII2S_MISC_CFG__S2_DOUT_RIGHT_SEL BIT(3)
246#define HII2S_MISC_CFG__S2_DOUT_TEST_SEL BIT(2)
247#define HII2S_MISC_CFG__S1_DOUT_TEST_SEL BIT(1)
248#define HII2S_MISC_CFG__S2_DOUT_LEFT_SEL BIT(0)
249
250#define HII2S_S2_SRC_CFG 0x64
251#define HII2S_MEM_CFG 0x68
252#define HII2S_THIRDMD_PCM_PGA_CFG 0x6c
253#define HII2S_THIRD_MODEM_FIFO_TH 0x70
254#define HII2S_S3_ANTI_FREQ_JITTER_TX_INC_CNT 0x74
255#define HII2S_S3_ANTI_FREQ_JITTER_TX_DEC_CNT 0x78
256#define HII2S_S3_ANTI_FREQ_JITTER_RX_INC_CNT 0x7c
257#define HII2S_S3_ANTI_FREQ_JITTER_RX_DEC_CNT 0x80
258#define HII2S_ANTI_FREQ_JITTER_EN 0x84
259#define HII2S_CLK_SEL 0x88
260
261/* 0 = BT owns the i2s */
262#define HII2S_CLK_SEL__I2S_BT_FM_SEL BIT(0)
263/* 0 = internal source, 1 = ext */
264#define HII2S_CLK_SEL__EXT_12_288MHZ_SEL BIT(1)
265
266
267#define HII2S_THIRDMD_DLINK_CHANNEL 0xe8
268#define HII2S_THIRDMD_ULINK_CHANNEL 0xec
269#define HII2S_VOICE_DLINK_CHANNEL 0xf0
270
271/* shovel data in here for playback */
272#define HII2S_ST_DL_CHANNEL 0xf4
273#define HII2S_STEREO_UPLINK_CHANNEL 0xf8
274#define HII2S_VOICE_UPLINK_CHANNEL 0xfc
275
276#endif/* _HI6210_I2S_H */
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 526855ad479e..67968ef3bbda 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -202,6 +202,30 @@ config SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH
202 platforms with MAX98090 audio codec it also can support TI jack chip as aux device. 202 platforms with MAX98090 audio codec it also can support TI jack chip as aux device.
203 If unsure select "N". 203 If unsure select "N".
204 204
205config SND_SOC_INTEL_BYT_CHT_DA7213_MACH
206 tristate "ASoC Audio driver for Intel Baytrail & Cherrytrail with DA7212/7213 codec"
207 depends on X86_INTEL_LPSS && I2C && ACPI
208 select SND_SOC_DA7213
209 select SND_SST_ATOM_HIFI2_PLATFORM
210 select SND_SST_IPC_ACPI
211 select SND_SOC_INTEL_SST_MATCH if ACPI
212 help
213 This adds support for ASoC machine driver for Intel(R) Baytrail & CherryTrail
214 platforms with DA7212/7213 audio codec.
215 If unsure select "N".
216
217config SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH
218 tristate "ASoC Audio driver for Intel Baytrail & Cherrytrail platform with no codec (MinnowBoard MAX, Up)"
219 depends on X86_INTEL_LPSS && I2C && ACPI
220 select SND_SST_ATOM_HIFI2_PLATFORM
221 select SND_SST_IPC_ACPI
222 select SND_SOC_INTEL_SST_MATCH if ACPI
223 help
224 This adds support for ASoC machine driver for the MinnowBoard Max or
225 Up boards and provides access to I2S signals on the Low-Speed
226 connector
227 If unsure select "N".
228
205config SND_SOC_INTEL_SKYLAKE 229config SND_SOC_INTEL_SKYLAKE
206 tristate 230 tristate
207 select SND_HDA_EXT_CORE 231 select SND_HDA_EXT_CORE
diff --git a/sound/soc/intel/atom/sst/sst_acpi.c b/sound/soc/intel/atom/sst/sst_acpi.c
index 747c0f393d2d..dd250b8b26f2 100644
--- a/sound/soc/intel/atom/sst/sst_acpi.c
+++ b/sound/soc/intel/atom/sst/sst_acpi.c
@@ -420,7 +420,21 @@ static const struct dmi_system_id byt_table[] = {
420 .callback = byt_thinkpad10_quirk_cb, 420 .callback = byt_thinkpad10_quirk_cb,
421 .matches = { 421 .matches = {
422 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 422 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
423 DMI_MATCH(DMI_PRODUCT_NAME, "20C3001VHH"), 423 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 10"),
424 },
425 },
426 {
427 .callback = byt_thinkpad10_quirk_cb,
428 .matches = {
429 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
430 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Tablet B"),
431 },
432 },
433 {
434 .callback = byt_thinkpad10_quirk_cb,
435 .matches = {
436 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
437 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Miix 2 10"),
424 }, 438 },
425 }, 439 },
426 { } 440 { }
@@ -480,12 +494,23 @@ static struct sst_acpi_mach sst_acpi_bytcr[] = {
480 &byt_rvp_platform_data }, 494 &byt_rvp_platform_data },
481 {"10EC5651", "bytcr_rt5651", "intel/fw_sst_0f28.bin", "bytcr_rt5651", NULL, 495 {"10EC5651", "bytcr_rt5651", "intel/fw_sst_0f28.bin", "bytcr_rt5651", NULL,
482 &byt_rvp_platform_data }, 496 &byt_rvp_platform_data },
497 {"DLGS7212", "bytcht_da7213", "intel/fw_sst_0f28.bin", "bytcht_da7213", NULL,
498 &byt_rvp_platform_data },
499 {"DLGS7213", "bytcht_da7213", "intel/fw_sst_0f28.bin", "bytcht_da7213", NULL,
500 &byt_rvp_platform_data },
483 /* some Baytrail platforms rely on RT5645, use CHT machine driver */ 501 /* some Baytrail platforms rely on RT5645, use CHT machine driver */
484 {"10EC5645", "cht-bsw-rt5645", "intel/fw_sst_0f28.bin", "cht-bsw", NULL, 502 {"10EC5645", "cht-bsw-rt5645", "intel/fw_sst_0f28.bin", "cht-bsw", NULL,
485 &byt_rvp_platform_data }, 503 &byt_rvp_platform_data },
486 {"10EC5648", "cht-bsw-rt5645", "intel/fw_sst_0f28.bin", "cht-bsw", NULL, 504 {"10EC5648", "cht-bsw-rt5645", "intel/fw_sst_0f28.bin", "cht-bsw", NULL,
487 &byt_rvp_platform_data }, 505 &byt_rvp_platform_data },
488 506#if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
507 /*
508 * This is always last in the table so that it is selected only when
509 * enabled explicitly and there is no codec-related information in SSDT
510 */
511 {"80860F28", "bytcht_nocodec", "intel/fw_sst_0f28.bin", "bytcht_nocodec", NULL,
512 &byt_rvp_platform_data },
513#endif
489 {}, 514 {},
490}; 515};
491 516
@@ -504,6 +529,10 @@ static struct sst_acpi_mach sst_acpi_chv[] = {
504 529
505 {"193C9890", "cht-bsw-max98090", "intel/fw_sst_22a8.bin", "cht-bsw", NULL, 530 {"193C9890", "cht-bsw-max98090", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
506 &chv_platform_data }, 531 &chv_platform_data },
532 {"DLGS7212", "bytcht_da7213", "intel/fw_sst_22a8.bin", "bytcht_da7213", NULL,
533 &chv_platform_data },
534 {"DLGS7213", "bytcht_da7213", "intel/fw_sst_22a8.bin", "bytcht_da7213", NULL,
535 &chv_platform_data },
507 /* some CHT-T platforms rely on RT5640, use Baytrail machine driver */ 536 /* some CHT-T platforms rely on RT5640, use Baytrail machine driver */
508 {"10EC5640", "bytcr_rt5640", "intel/fw_sst_22a8.bin", "bytcr_rt5640", cht_quirk, 537 {"10EC5640", "bytcr_rt5640", "intel/fw_sst_22a8.bin", "bytcr_rt5640", cht_quirk,
509 &chv_platform_data }, 538 &chv_platform_data },
@@ -512,6 +541,14 @@ static struct sst_acpi_mach sst_acpi_chv[] = {
512 /* some CHT-T platforms rely on RT5651, use Baytrail machine driver */ 541 /* some CHT-T platforms rely on RT5651, use Baytrail machine driver */
513 {"10EC5651", "bytcr_rt5651", "intel/fw_sst_22a8.bin", "bytcr_rt5651", NULL, 542 {"10EC5651", "bytcr_rt5651", "intel/fw_sst_22a8.bin", "bytcr_rt5651", NULL,
514 &chv_platform_data }, 543 &chv_platform_data },
544#if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
545 /*
546 * This is always last in the table so that it is selected only when
547 * enabled explicitly and there is no codec-related information in SSDT
548 */
549 {"808622A8", "bytcht_nocodec", "intel/fw_sst_22a8.bin", "bytcht_nocodec", NULL,
550 &chv_platform_data },
551#endif
515 {}, 552 {},
516}; 553};
517 554
diff --git a/sound/soc/intel/atom/sst/sst_ipc.c b/sound/soc/intel/atom/sst/sst_ipc.c
index 14c2d9d18180..20b01e02ed8f 100644
--- a/sound/soc/intel/atom/sst/sst_ipc.c
+++ b/sound/soc/intel/atom/sst/sst_ipc.c
@@ -236,7 +236,9 @@ static void process_fw_init(struct intel_sst_drv *sst_drv_ctx,
236 retval = init->result; 236 retval = init->result;
237 goto ret; 237 goto ret;
238 } 238 }
239 dev_info(sst_drv_ctx->dev, "FW Version %02x.%02x.%02x.%02x\n", 239 if (memcmp(&sst_drv_ctx->fw_version, &init->fw_version,
240 sizeof(init->fw_version)))
241 dev_info(sst_drv_ctx->dev, "FW Version %02x.%02x.%02x.%02x\n",
240 init->fw_version.type, init->fw_version.major, 242 init->fw_version.type, init->fw_version.major,
241 init->fw_version.minor, init->fw_version.build); 243 init->fw_version.minor, init->fw_version.build);
242 dev_dbg(sst_drv_ctx->dev, "Build date %s Time %s\n", 244 dev_dbg(sst_drv_ctx->dev, "Build date %s Time %s\n",
diff --git a/sound/soc/intel/boards/Makefile b/sound/soc/intel/boards/Makefile
index 5639f10774e6..56896e09445d 100644
--- a/sound/soc/intel/boards/Makefile
+++ b/sound/soc/intel/boards/Makefile
@@ -10,6 +10,8 @@ snd-soc-sst-bytcr-rt5651-objs := bytcr_rt5651.o
10snd-soc-sst-cht-bsw-rt5672-objs := cht_bsw_rt5672.o 10snd-soc-sst-cht-bsw-rt5672-objs := cht_bsw_rt5672.o
11snd-soc-sst-cht-bsw-rt5645-objs := cht_bsw_rt5645.o 11snd-soc-sst-cht-bsw-rt5645-objs := cht_bsw_rt5645.o
12snd-soc-sst-cht-bsw-max98090_ti-objs := cht_bsw_max98090_ti.o 12snd-soc-sst-cht-bsw-max98090_ti-objs := cht_bsw_max98090_ti.o
13snd-soc-sst-byt-cht-da7213-objs := bytcht_da7213.o
14snd-soc-sst-byt-cht-nocodec-objs := bytcht_nocodec.o
13snd-soc-skl_rt286-objs := skl_rt286.o 15snd-soc-skl_rt286-objs := skl_rt286.o
14snd-skl_nau88l25_max98357a-objs := skl_nau88l25_max98357a.o 16snd-skl_nau88l25_max98357a-objs := skl_nau88l25_max98357a.o
15snd-soc-skl_nau88l25_ssm4567-objs := skl_nau88l25_ssm4567.o 17snd-soc-skl_nau88l25_ssm4567-objs := skl_nau88l25_ssm4567.o
@@ -26,6 +28,8 @@ obj-$(CONFIG_SND_SOC_INTEL_BYTCR_RT5651_MACH) += snd-soc-sst-bytcr-rt5651.o
26obj-$(CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH) += snd-soc-sst-cht-bsw-rt5672.o 28obj-$(CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH) += snd-soc-sst-cht-bsw-rt5672.o
27obj-$(CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH) += snd-soc-sst-cht-bsw-rt5645.o 29obj-$(CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH) += snd-soc-sst-cht-bsw-rt5645.o
28obj-$(CONFIG_SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH) += snd-soc-sst-cht-bsw-max98090_ti.o 30obj-$(CONFIG_SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH) += snd-soc-sst-cht-bsw-max98090_ti.o
31obj-$(CONFIG_SND_SOC_INTEL_BYT_CHT_DA7213_MACH) += snd-soc-sst-byt-cht-da7213.o
32obj-$(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH) += snd-soc-sst-byt-cht-nocodec.o
29obj-$(CONFIG_SND_SOC_INTEL_SKL_RT286_MACH) += snd-soc-skl_rt286.o 33obj-$(CONFIG_SND_SOC_INTEL_SKL_RT286_MACH) += snd-soc-skl_rt286.o
30obj-$(CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH) += snd-skl_nau88l25_max98357a.o 34obj-$(CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH) += snd-skl_nau88l25_max98357a.o
31obj-$(CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH) += snd-soc-skl_nau88l25_ssm4567.o 35obj-$(CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH) += snd-soc-skl_nau88l25_ssm4567.o
diff --git a/sound/soc/intel/boards/bdw-rt5677.c b/sound/soc/intel/boards/bdw-rt5677.c
index 53c6b4cbb1e1..14d9693c1641 100644
--- a/sound/soc/intel/boards/bdw-rt5677.c
+++ b/sound/soc/intel/boards/bdw-rt5677.c
@@ -193,13 +193,12 @@ static int bdw_rt5677_init(struct snd_soc_pcm_runtime *rtd)
193 RT5677_CLK_SEL_I2S1_ASRC); 193 RT5677_CLK_SEL_I2S1_ASRC);
194 194
195 /* Request rt5677 GPIO for headphone amp control */ 195 /* Request rt5677 GPIO for headphone amp control */
196 bdw_rt5677->gpio_hp_en = devm_gpiod_get_index(codec->dev, 196 bdw_rt5677->gpio_hp_en = devm_gpiod_get(codec->dev, "headphone-enable",
197 "headphone-enable", 0, 0); 197 GPIOD_OUT_LOW);
198 if (IS_ERR(bdw_rt5677->gpio_hp_en)) { 198 if (IS_ERR(bdw_rt5677->gpio_hp_en)) {
199 dev_err(codec->dev, "Can't find HP_AMP_SHDN_L gpio\n"); 199 dev_err(codec->dev, "Can't find HP_AMP_SHDN_L gpio\n");
200 return PTR_ERR(bdw_rt5677->gpio_hp_en); 200 return PTR_ERR(bdw_rt5677->gpio_hp_en);
201 } 201 }
202 gpiod_direction_output(bdw_rt5677->gpio_hp_en, 0);
203 202
204 /* Create and initialize headphone jack */ 203 /* Create and initialize headphone jack */
205 if (!snd_soc_card_jack_new(rtd->card, "Headphone Jack", 204 if (!snd_soc_card_jack_new(rtd->card, "Headphone Jack",
diff --git a/sound/soc/intel/boards/broadwell.c b/sound/soc/intel/boards/broadwell.c
index faf865bb1765..6dcbbcefc25b 100644
--- a/sound/soc/intel/boards/broadwell.c
+++ b/sound/soc/intel/boards/broadwell.c
@@ -269,9 +269,6 @@ static struct snd_soc_card broadwell_rt286 = {
269static int broadwell_audio_probe(struct platform_device *pdev) 269static int broadwell_audio_probe(struct platform_device *pdev)
270{ 270{
271 broadwell_rt286.dev = &pdev->dev; 271 broadwell_rt286.dev = &pdev->dev;
272
273 snd_soc_set_dmi_name(&broadwell_rt286, NULL);
274
275 return devm_snd_soc_register_card(&pdev->dev, &broadwell_rt286); 272 return devm_snd_soc_register_card(&pdev->dev, &broadwell_rt286);
276} 273}
277 274
diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
index 2cda06cde4d1..3a8c4d954a91 100644
--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
+++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
@@ -55,6 +55,54 @@ enum {
55 BXT_DPCM_AUDIO_HDMI3_PB, 55 BXT_DPCM_AUDIO_HDMI3_PB,
56}; 56};
57 57
58static inline struct snd_soc_dai *bxt_get_codec_dai(struct snd_soc_card *card)
59{
60 struct snd_soc_pcm_runtime *rtd;
61
62 list_for_each_entry(rtd, &card->rtd_list, list) {
63
64 if (!strncmp(rtd->codec_dai->name, BXT_DIALOG_CODEC_DAI,
65 strlen(BXT_DIALOG_CODEC_DAI)))
66 return rtd->codec_dai;
67 }
68
69 return NULL;
70}
71
72static int platform_clock_control(struct snd_soc_dapm_widget *w,
73 struct snd_kcontrol *k, int event)
74{
75 int ret = 0;
76 struct snd_soc_dapm_context *dapm = w->dapm;
77 struct snd_soc_card *card = dapm->card;
78 struct snd_soc_dai *codec_dai;
79
80 codec_dai = bxt_get_codec_dai(card);
81 if (!codec_dai) {
82 dev_err(card->dev, "Codec dai not found; Unable to set/unset codec pll\n");
83 return -EIO;
84 }
85
86 if (SND_SOC_DAPM_EVENT_OFF(event)) {
87 ret = snd_soc_dai_set_pll(codec_dai, 0,
88 DA7219_SYSCLK_MCLK, 0, 0);
89 if (ret)
90 dev_err(card->dev, "failed to stop PLL: %d\n", ret);
91 } else if(SND_SOC_DAPM_EVENT_ON(event)) {
92 ret = snd_soc_dai_set_sysclk(codec_dai,
93 DA7219_CLKSRC_MCLK, 19200000, SND_SOC_CLOCK_IN);
94 if (ret)
95 dev_err(card->dev, "can't set codec sysclk configuration\n");
96
97 ret = snd_soc_dai_set_pll(codec_dai, 0,
98 DA7219_SYSCLK_PLL_SRM, 0, DA7219_PLL_FREQ_OUT_98304);
99 if (ret)
100 dev_err(card->dev, "failed to start PLL: %d\n", ret);
101 }
102
103 return ret;
104}
105
58static const struct snd_kcontrol_new broxton_controls[] = { 106static const struct snd_kcontrol_new broxton_controls[] = {
59 SOC_DAPM_PIN_SWITCH("Headphone Jack"), 107 SOC_DAPM_PIN_SWITCH("Headphone Jack"),
60 SOC_DAPM_PIN_SWITCH("Headset Mic"), 108 SOC_DAPM_PIN_SWITCH("Headset Mic"),
@@ -69,6 +117,8 @@ static const struct snd_soc_dapm_widget broxton_widgets[] = {
69 SND_SOC_DAPM_SPK("HDMI1", NULL), 117 SND_SOC_DAPM_SPK("HDMI1", NULL),
70 SND_SOC_DAPM_SPK("HDMI2", NULL), 118 SND_SOC_DAPM_SPK("HDMI2", NULL),
71 SND_SOC_DAPM_SPK("HDMI3", NULL), 119 SND_SOC_DAPM_SPK("HDMI3", NULL),
120 SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
121 platform_clock_control, SND_SOC_DAPM_POST_PMD|SND_SOC_DAPM_PRE_PMU),
72}; 122};
73 123
74static const struct snd_soc_dapm_route broxton_map[] = { 124static const struct snd_soc_dapm_route broxton_map[] = {
@@ -109,6 +159,9 @@ static const struct snd_soc_dapm_route broxton_map[] = {
109 /* DMIC */ 159 /* DMIC */
110 {"dmic01_hifi", NULL, "DMIC01 Rx"}, 160 {"dmic01_hifi", NULL, "DMIC01 Rx"},
111 {"DMIC01 Rx", NULL, "DMIC AIF"}, 161 {"DMIC01 Rx", NULL, "DMIC AIF"},
162
163 { "Headphone Jack", NULL, "Platform Clock" },
164 { "Headset Mic", NULL, "Platform Clock" },
112}; 165};
113 166
114static int broxton_ssp_fixup(struct snd_soc_pcm_runtime *rtd, 167static int broxton_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
@@ -243,49 +296,6 @@ static const struct snd_soc_ops broxton_da7219_fe_ops = {
243 .startup = bxt_fe_startup, 296 .startup = bxt_fe_startup,
244}; 297};
245 298
246static int broxton_da7219_hw_params(struct snd_pcm_substream *substream,
247 struct snd_pcm_hw_params *params)
248{
249 struct snd_soc_pcm_runtime *rtd = substream->private_data;
250 struct snd_soc_dai *codec_dai = rtd->codec_dai;
251 int ret;
252
253 ret = snd_soc_dai_set_sysclk(codec_dai,
254 DA7219_CLKSRC_MCLK, 19200000, SND_SOC_CLOCK_IN);
255 if (ret < 0)
256 dev_err(codec_dai->dev, "can't set codec sysclk configuration\n");
257
258 ret = snd_soc_dai_set_pll(codec_dai, 0,
259 DA7219_SYSCLK_PLL_SRM, 0, DA7219_PLL_FREQ_OUT_98304);
260 if (ret < 0) {
261 dev_err(codec_dai->dev, "failed to start PLL: %d\n", ret);
262 return -EIO;
263 }
264
265 return ret;
266}
267
268static int broxton_da7219_hw_free(struct snd_pcm_substream *substream)
269{
270 struct snd_soc_pcm_runtime *rtd = substream->private_data;
271 struct snd_soc_dai *codec_dai = rtd->codec_dai;
272 int ret;
273
274 ret = snd_soc_dai_set_pll(codec_dai, 0,
275 DA7219_SYSCLK_MCLK, 0, 0);
276 if (ret < 0) {
277 dev_err(codec_dai->dev, "failed to stop PLL: %d\n", ret);
278 return -EIO;
279 }
280
281 return ret;
282}
283
284static const struct snd_soc_ops broxton_da7219_ops = {
285 .hw_params = broxton_da7219_hw_params,
286 .hw_free = broxton_da7219_hw_free,
287};
288
289static int broxton_dmic_fixup(struct snd_soc_pcm_runtime *rtd, 299static int broxton_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
290 struct snd_pcm_hw_params *params) 300 struct snd_pcm_hw_params *params)
291{ 301{
@@ -467,7 +477,6 @@ static struct snd_soc_dai_link broxton_dais[] = {
467 SND_SOC_DAIFMT_CBS_CFS, 477 SND_SOC_DAIFMT_CBS_CFS,
468 .ignore_pmdown_time = 1, 478 .ignore_pmdown_time = 1,
469 .be_hw_params_fixup = broxton_ssp_fixup, 479 .be_hw_params_fixup = broxton_ssp_fixup,
470 .ops = &broxton_da7219_ops,
471 .dpcm_playback = 1, 480 .dpcm_playback = 1,
472 .dpcm_capture = 1, 481 .dpcm_capture = 1,
473 }, 482 },
diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
index 176c080a9818..1a68d043c803 100644
--- a/sound/soc/intel/boards/bxt_rt298.c
+++ b/sound/soc/intel/boards/bxt_rt298.c
@@ -274,12 +274,15 @@ static int bxt_fe_startup(struct snd_pcm_substream *substream)
274 * on this platform for PCM device we support: 274 * on this platform for PCM device we support:
275 * 48Khz 275 * 48Khz
276 * stereo 276 * stereo
277 * 16-bit audio
277 */ 278 */
278 279
279 runtime->hw.channels_max = 2; 280 runtime->hw.channels_max = 2;
280 snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, 281 snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
281 &constraints_channels); 282 &constraints_channels);
282 283
284 runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
285 snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
283 snd_pcm_hw_constraint_list(runtime, 0, 286 snd_pcm_hw_constraint_list(runtime, 0,
284 SNDRV_PCM_HW_PARAM_RATE, &constraints_rates); 287 SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
285 288
diff --git a/sound/soc/intel/boards/bytcht_da7213.c b/sound/soc/intel/boards/bytcht_da7213.c
new file mode 100644
index 000000000000..18873e23f404
--- /dev/null
+++ b/sound/soc/intel/boards/bytcht_da7213.c
@@ -0,0 +1,283 @@
1/*
2 * bytcht-da7213.c - ASoc Machine driver for Intel Baytrail and
3 * Cherrytrail-based platforms, with Dialog DA7213 codec
4 *
5 * Copyright (C) 2017 Intel Corporation
6 * Author: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
7 *
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 */
21
22#include <linux/module.h>
23#include <linux/acpi.h>
24#include <linux/platform_device.h>
25#include <linux/slab.h>
26#include <asm/platform_sst_audio.h>
27#include <sound/pcm.h>
28#include <sound/pcm_params.h>
29#include <sound/soc.h>
30#include "../../codecs/da7213.h"
31#include "../atom/sst-atom-controls.h"
32#include "../common/sst-acpi.h"
33
34static const struct snd_kcontrol_new controls[] = {
35 SOC_DAPM_PIN_SWITCH("Headphone Jack"),
36 SOC_DAPM_PIN_SWITCH("Headset Mic"),
37 SOC_DAPM_PIN_SWITCH("Mic"),
38 SOC_DAPM_PIN_SWITCH("Aux In"),
39};
40
41static const struct snd_soc_dapm_widget dapm_widgets[] = {
42 SND_SOC_DAPM_HP("Headphone Jack", NULL),
43 SND_SOC_DAPM_MIC("Headset Mic", NULL),
44 SND_SOC_DAPM_MIC("Mic", NULL),
45 SND_SOC_DAPM_LINE("Aux In", NULL),
46};
47
48static const struct snd_soc_dapm_route audio_map[] = {
49 {"Headphone Jack", NULL, "HPL"},
50 {"Headphone Jack", NULL, "HPR"},
51
52 {"AUXL", NULL, "Aux In"},
53 {"AUXR", NULL, "Aux In"},
54
55 /* Assume Mic1 is linked to Headset and Mic2 to on-board mic */
56 {"MIC1", NULL, "Headset Mic"},
57 {"MIC2", NULL, "Mic"},
58
59 /* SOC-codec link */
60 {"ssp2 Tx", NULL, "codec_out0"},
61 {"ssp2 Tx", NULL, "codec_out1"},
62 {"codec_in0", NULL, "ssp2 Rx"},
63 {"codec_in1", NULL, "ssp2 Rx"},
64
65 {"Playback", NULL, "ssp2 Tx"},
66 {"ssp2 Rx", NULL, "Capture"},
67};
68
69static int codec_fixup(struct snd_soc_pcm_runtime *rtd,
70 struct snd_pcm_hw_params *params)
71{
72 int ret;
73 struct snd_interval *rate = hw_param_interval(params,
74 SNDRV_PCM_HW_PARAM_RATE);
75 struct snd_interval *channels = hw_param_interval(params,
76 SNDRV_PCM_HW_PARAM_CHANNELS);
77
78 /* The DSP will convert the FE rate to 48k, stereo, 24bits */
79 rate->min = rate->max = 48000;
80 channels->min = channels->max = 2;
81
82 /* set SSP2 to 24-bit */
83 params_set_format(params, SNDRV_PCM_FORMAT_S24_LE);
84
85 /*
86 * Default mode for SSP configuration is TDM 4 slot, override config
87 * with explicit setting to I2S 2ch 24-bit. The word length is set with
88 * dai_set_tdm_slot() since there is no other API exposed
89 */
90 ret = snd_soc_dai_set_fmt(rtd->cpu_dai,
91 SND_SOC_DAIFMT_I2S |
92 SND_SOC_DAIFMT_NB_NF |
93 SND_SOC_DAIFMT_CBS_CFS);
94 if (ret < 0) {
95 dev_err(rtd->dev, "can't set format to I2S, err %d\n", ret);
96 return ret;
97 }
98
99 ret = snd_soc_dai_set_tdm_slot(rtd->cpu_dai, 0x3, 0x3, 2, 24);
100 if (ret < 0) {
101 dev_err(rtd->dev, "can't set I2S config, err %d\n", ret);
102 return ret;
103 }
104
105 return 0;
106}
107
108static int aif1_startup(struct snd_pcm_substream *substream)
109{
110 return snd_pcm_hw_constraint_single(substream->runtime,
111 SNDRV_PCM_HW_PARAM_RATE, 48000);
112}
113
114static int aif1_hw_params(struct snd_pcm_substream *substream,
115 struct snd_pcm_hw_params *params)
116{
117 struct snd_soc_pcm_runtime *rtd = substream->private_data;
118 struct snd_soc_dai *codec_dai = rtd->codec_dai;
119 int ret;
120
121 ret = snd_soc_dai_set_sysclk(codec_dai, DA7213_CLKSRC_MCLK,
122 19200000, SND_SOC_CLOCK_IN);
123 if (ret < 0)
124 dev_err(codec_dai->dev, "can't set codec sysclk configuration\n");
125
126 ret = snd_soc_dai_set_pll(codec_dai, 0,
127 DA7213_SYSCLK_PLL_SRM, 0, DA7213_PLL_FREQ_OUT_98304000);
128 if (ret < 0) {
129 dev_err(codec_dai->dev, "failed to start PLL: %d\n", ret);
130 return -EIO;
131 }
132
133 return ret;
134}
135
136static int aif1_hw_free(struct snd_pcm_substream *substream)
137{
138 struct snd_soc_pcm_runtime *rtd = substream->private_data;
139 struct snd_soc_dai *codec_dai = rtd->codec_dai;
140 int ret;
141
142 ret = snd_soc_dai_set_pll(codec_dai, 0,
143 DA7213_SYSCLK_MCLK, 0, 0);
144 if (ret < 0) {
145 dev_err(codec_dai->dev, "failed to stop PLL: %d\n", ret);
146 return -EIO;
147 }
148
149 return ret;
150}
151
152static const struct snd_soc_ops aif1_ops = {
153 .startup = aif1_startup,
154};
155
156static const struct snd_soc_ops ssp2_ops = {
157 .hw_params = aif1_hw_params,
158 .hw_free = aif1_hw_free,
159
160};
161
162static struct snd_soc_dai_link dailink[] = {
163 [MERR_DPCM_AUDIO] = {
164 .name = "Audio Port",
165 .stream_name = "Audio",
166 .cpu_dai_name = "media-cpu-dai",
167 .codec_dai_name = "snd-soc-dummy-dai",
168 .codec_name = "snd-soc-dummy",
169 .platform_name = "sst-mfld-platform",
170 .nonatomic = true,
171 .dynamic = 1,
172 .dpcm_playback = 1,
173 .dpcm_capture = 1,
174 .ops = &aif1_ops,
175 },
176 [MERR_DPCM_DEEP_BUFFER] = {
177 .name = "Deep-Buffer Audio Port",
178 .stream_name = "Deep-Buffer Audio",
179 .cpu_dai_name = "deepbuffer-cpu-dai",
180 .codec_dai_name = "snd-soc-dummy-dai",
181 .codec_name = "snd-soc-dummy",
182 .platform_name = "sst-mfld-platform",
183 .nonatomic = true,
184 .dynamic = 1,
185 .dpcm_playback = 1,
186 .ops = &aif1_ops,
187 },
188 [MERR_DPCM_COMPR] = {
189 .name = "Compressed Port",
190 .stream_name = "Compress",
191 .cpu_dai_name = "compress-cpu-dai",
192 .codec_dai_name = "snd-soc-dummy-dai",
193 .codec_name = "snd-soc-dummy",
194 .platform_name = "sst-mfld-platform",
195 },
196 /* CODEC<->CODEC link */
197 /* back ends */
198 {
199 .name = "SSP2-Codec",
200 .id = 1,
201 .cpu_dai_name = "ssp2-port",
202 .platform_name = "sst-mfld-platform",
203 .no_pcm = 1,
204 .codec_dai_name = "da7213-hifi",
205 .codec_name = "i2c-DLGS7213:00",
206 .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
207 | SND_SOC_DAIFMT_CBS_CFS,
208 .be_hw_params_fixup = codec_fixup,
209 .nonatomic = true,
210 .dpcm_playback = 1,
211 .dpcm_capture = 1,
212 .ops = &ssp2_ops,
213 },
214};
215
216/* SoC card */
217static struct snd_soc_card bytcht_da7213_card = {
218 .name = "bytcht-da7213",
219 .owner = THIS_MODULE,
220 .dai_link = dailink,
221 .num_links = ARRAY_SIZE(dailink),
222 .controls = controls,
223 .num_controls = ARRAY_SIZE(controls),
224 .dapm_widgets = dapm_widgets,
225 .num_dapm_widgets = ARRAY_SIZE(dapm_widgets),
226 .dapm_routes = audio_map,
227 .num_dapm_routes = ARRAY_SIZE(audio_map),
228};
229
230static char codec_name[16]; /* i2c-<HID>:00 with HID being 8 chars */
231
232static int bytcht_da7213_probe(struct platform_device *pdev)
233{
234 int ret_val = 0;
235 int i;
236 struct snd_soc_card *card;
237 struct sst_acpi_mach *mach;
238 const char *i2c_name = NULL;
239 int dai_index = 0;
240
241 mach = (&pdev->dev)->platform_data;
242 card = &bytcht_da7213_card;
243 card->dev = &pdev->dev;
244
245 /* fix index of codec dai */
246 dai_index = MERR_DPCM_COMPR + 1;
247 for (i = 0; i < ARRAY_SIZE(dailink); i++) {
248 if (!strcmp(dailink[i].codec_name, "i2c-DLGS7213:00")) {
249 dai_index = i;
250 break;
251 }
252 }
253
254 /* fixup codec name based on HID */
255 i2c_name = sst_acpi_find_name_from_hid(mach->id);
256 if (i2c_name != NULL) {
257 snprintf(codec_name, sizeof(codec_name),
258 "%s%s", "i2c-", i2c_name);
259 dailink[dai_index].codec_name = codec_name;
260 }
261
262 ret_val = devm_snd_soc_register_card(&pdev->dev, card);
263 if (ret_val) {
264 dev_err(&pdev->dev,
265 "snd_soc_register_card failed %d\n", ret_val);
266 return ret_val;
267 }
268 platform_set_drvdata(pdev, card);
269 return ret_val;
270}
271
272static struct platform_driver bytcht_da7213_driver = {
273 .driver = {
274 .name = "bytcht_da7213",
275 },
276 .probe = bytcht_da7213_probe,
277};
278module_platform_driver(bytcht_da7213_driver);
279
280MODULE_DESCRIPTION("ASoC Intel(R) Baytrail/Cherrytrail+DA7213 Machine driver");
281MODULE_AUTHOR("Pierre-Louis Bossart");
282MODULE_LICENSE("GPL v2");
283MODULE_ALIAS("platform:bytcht_da7213");
diff --git a/sound/soc/intel/boards/bytcht_nocodec.c b/sound/soc/intel/boards/bytcht_nocodec.c
new file mode 100644
index 000000000000..89853eeaaf9d
--- /dev/null
+++ b/sound/soc/intel/boards/bytcht_nocodec.c
@@ -0,0 +1,208 @@
1/*
2 * bytcht_nocodec.c - ASoc Machine driver for MinnowBoard Max and Up
3 * to make I2S signals observable on the Low-Speed connector. Audio codec
4 * is not managed by ASoC/DAPM
5 *
6 * Copyright (C) 2015-2017 Intel Corp
7 *
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 */
21
22#include <linux/module.h>
23#include <sound/pcm.h>
24#include <sound/pcm_params.h>
25#include <sound/soc.h>
26#include "../atom/sst-atom-controls.h"
27
28static const struct snd_soc_dapm_widget widgets[] = {
29 SND_SOC_DAPM_MIC("Mic", NULL),
30 SND_SOC_DAPM_SPK("Speaker", NULL),
31};
32
33static const struct snd_kcontrol_new controls[] = {
34 SOC_DAPM_PIN_SWITCH("Mic"),
35 SOC_DAPM_PIN_SWITCH("Speaker"),
36};
37
38static const struct snd_soc_dapm_route audio_map[] = {
39 {"ssp2 Tx", NULL, "codec_out0"},
40 {"ssp2 Tx", NULL, "codec_out1"},
41 {"codec_in0", NULL, "ssp2 Rx"},
42 {"codec_in1", NULL, "ssp2 Rx"},
43
44 {"ssp2 Rx", NULL, "Mic"},
45 {"Speaker", NULL, "ssp2 Tx"},
46};
47
48static int codec_fixup(struct snd_soc_pcm_runtime *rtd,
49 struct snd_pcm_hw_params *params)
50{
51 struct snd_interval *rate = hw_param_interval(params,
52 SNDRV_PCM_HW_PARAM_RATE);
53 struct snd_interval *channels = hw_param_interval(params,
54 SNDRV_PCM_HW_PARAM_CHANNELS);
55 int ret;
56
57 /* The DSP will convert the FE rate to 48k, stereo, 24bits */
58 rate->min = rate->max = 48000;
59 channels->min = channels->max = 2;
60
61 /* set SSP2 to 24-bit */
62 params_set_format(params, SNDRV_PCM_FORMAT_S24_LE);
63
64 /*
65 * Default mode for SSP configuration is TDM 4 slot, override config
66 * with explicit setting to I2S 2ch 24-bit. The word length is set with
67 * dai_set_tdm_slot() since there is no other API exposed
68 */
69 ret = snd_soc_dai_set_fmt(rtd->cpu_dai,
70 SND_SOC_DAIFMT_I2S |
71 SND_SOC_DAIFMT_NB_NF |
72 SND_SOC_DAIFMT_CBS_CFS);
73
74 if (ret < 0) {
75 dev_err(rtd->dev, "can't set format to I2S, err %d\n", ret);
76 return ret;
77 }
78
79 ret = snd_soc_dai_set_tdm_slot(rtd->cpu_dai, 0x3, 0x3, 2, 24);
80 if (ret < 0) {
81 dev_err(rtd->dev, "can't set I2S config, err %d\n", ret);
82 return ret;
83 }
84
85 return 0;
86}
87
88static unsigned int rates_48000[] = {
89 48000,
90};
91
92static struct snd_pcm_hw_constraint_list constraints_48000 = {
93 .count = ARRAY_SIZE(rates_48000),
94 .list = rates_48000,
95};
96
97static int aif1_startup(struct snd_pcm_substream *substream)
98{
99 return snd_pcm_hw_constraint_list(substream->runtime, 0,
100 SNDRV_PCM_HW_PARAM_RATE,
101 &constraints_48000);
102}
103
104static struct snd_soc_ops aif1_ops = {
105 .startup = aif1_startup,
106};
107
108static struct snd_soc_dai_link dais[] = {
109 [MERR_DPCM_AUDIO] = {
110 .name = "Audio Port",
111 .stream_name = "Audio",
112 .cpu_dai_name = "media-cpu-dai",
113 .codec_dai_name = "snd-soc-dummy-dai",
114 .codec_name = "snd-soc-dummy",
115 .platform_name = "sst-mfld-platform",
116 .ignore_suspend = 1,
117 .nonatomic = true,
118 .dynamic = 1,
119 .dpcm_playback = 1,
120 .dpcm_capture = 1,
121 .ops = &aif1_ops,
122 },
123 [MERR_DPCM_DEEP_BUFFER] = {
124 .name = "Deep-Buffer Audio Port",
125 .stream_name = "Deep-Buffer Audio",
126 .cpu_dai_name = "deepbuffer-cpu-dai",
127 .codec_dai_name = "snd-soc-dummy-dai",
128 .codec_name = "snd-soc-dummy",
129 .platform_name = "sst-mfld-platform",
130 .ignore_suspend = 1,
131 .nonatomic = true,
132 .dynamic = 1,
133 .dpcm_playback = 1,
134 .ops = &aif1_ops,
135 },
136 [MERR_DPCM_COMPR] = {
137 .name = "Compressed Port",
138 .stream_name = "Compress",
139 .cpu_dai_name = "compress-cpu-dai",
140 .codec_dai_name = "snd-soc-dummy-dai",
141 .codec_name = "snd-soc-dummy",
142 .platform_name = "sst-mfld-platform",
143 },
144 /* CODEC<->CODEC link */
145 /* back ends */
146 {
147 .name = "SSP2-LowSpeed Connector",
148 .id = 1,
149 .cpu_dai_name = "ssp2-port",
150 .platform_name = "sst-mfld-platform",
151 .no_pcm = 1,
152 .codec_dai_name = "snd-soc-dummy-dai",
153 .codec_name = "snd-soc-dummy",
154 .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
155 | SND_SOC_DAIFMT_CBS_CFS,
156 .be_hw_params_fixup = codec_fixup,
157 .ignore_suspend = 1,
158 .nonatomic = true,
159 .dpcm_playback = 1,
160 .dpcm_capture = 1,
161 },
162};
163
164/* SoC card */
165static struct snd_soc_card bytcht_nocodec_card = {
166 .name = "bytcht-nocodec",
167 .owner = THIS_MODULE,
168 .dai_link = dais,
169 .num_links = ARRAY_SIZE(dais),
170 .dapm_widgets = widgets,
171 .num_dapm_widgets = ARRAY_SIZE(widgets),
172 .dapm_routes = audio_map,
173 .num_dapm_routes = ARRAY_SIZE(audio_map),
174 .controls = controls,
175 .num_controls = ARRAY_SIZE(controls),
176 .fully_routed = true,
177};
178
179static int snd_bytcht_nocodec_mc_probe(struct platform_device *pdev)
180{
181 int ret_val = 0;
182
183 /* register the soc card */
184 bytcht_nocodec_card.dev = &pdev->dev;
185
186 ret_val = devm_snd_soc_register_card(&pdev->dev, &bytcht_nocodec_card);
187
188 if (ret_val) {
189 dev_err(&pdev->dev, "devm_snd_soc_register_card failed %d\n",
190 ret_val);
191 return ret_val;
192 }
193 platform_set_drvdata(pdev, &bytcht_nocodec_card);
194 return ret_val;
195}
196
197static struct platform_driver snd_bytcht_nocodec_mc_driver = {
198 .driver = {
199 .name = "bytcht_nocodec",
200 },
201 .probe = snd_bytcht_nocodec_mc_probe,
202};
203module_platform_driver(snd_bytcht_nocodec_mc_driver);
204
205MODULE_DESCRIPTION("ASoC Intel(R) Baytrail/Cherrytrail Nocodec Machine driver");
206MODULE_AUTHOR("Pierre-Louis Bossart <pierre-louis.bossart at linux.intel.com>");
207MODULE_LICENSE("GPL v2");
208MODULE_ALIAS("platform:bytcht_nocodec");
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 9e2a3404a836..4a76b099a508 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -19,6 +19,7 @@
19 19
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/moduleparam.h>
22#include <linux/platform_device.h> 23#include <linux/platform_device.h>
23#include <linux/acpi.h> 24#include <linux/acpi.h>
24#include <linux/device.h> 25#include <linux/device.h>
@@ -56,35 +57,88 @@ enum {
56struct byt_rt5640_private { 57struct byt_rt5640_private {
57 struct clk *mclk; 58 struct clk *mclk;
58}; 59};
60static bool is_bytcr;
59 61
60static unsigned long byt_rt5640_quirk = BYT_RT5640_MCLK_EN; 62static unsigned long byt_rt5640_quirk = BYT_RT5640_MCLK_EN;
63static unsigned int quirk_override;
64module_param_named(quirk, quirk_override, uint, 0444);
65MODULE_PARM_DESC(quirk, "Board-specific quirk override");
61 66
62static void log_quirks(struct device *dev) 67static void log_quirks(struct device *dev)
63{ 68{
64 if (BYT_RT5640_MAP(byt_rt5640_quirk) == BYT_RT5640_DMIC1_MAP) 69 int map;
65 dev_info(dev, "quirk DMIC1_MAP enabled"); 70 bool has_dmic = false;
66 if (BYT_RT5640_MAP(byt_rt5640_quirk) == BYT_RT5640_DMIC2_MAP) 71 bool has_mclk = false;
67 dev_info(dev, "quirk DMIC2_MAP enabled"); 72 bool has_ssp0 = false;
68 if (BYT_RT5640_MAP(byt_rt5640_quirk) == BYT_RT5640_IN1_MAP) 73 bool has_ssp0_aif1 = false;
69 dev_info(dev, "quirk IN1_MAP enabled"); 74 bool has_ssp0_aif2 = false;
70 if (BYT_RT5640_MAP(byt_rt5640_quirk) == BYT_RT5640_IN3_MAP) 75 bool has_ssp2_aif2 = false;
71 dev_info(dev, "quirk IN3_MAP enabled"); 76
72 if (byt_rt5640_quirk & BYT_RT5640_DMIC_EN) 77 map = BYT_RT5640_MAP(byt_rt5640_quirk);
73 dev_info(dev, "quirk DMIC enabled"); 78 switch (map) {
79 case BYT_RT5640_DMIC1_MAP:
80 dev_info(dev, "quirk DMIC1_MAP enabled\n");
81 has_dmic = true;
82 break;
83 case BYT_RT5640_DMIC2_MAP:
84 dev_info(dev, "quirk DMIC2_MAP enabled\n");
85 has_dmic = true;
86 break;
87 case BYT_RT5640_IN1_MAP:
88 dev_info(dev, "quirk IN1_MAP enabled\n");
89 break;
90 case BYT_RT5640_IN3_MAP:
91 dev_info(dev, "quirk IN3_MAP enabled\n");
92 break;
93 default:
94 dev_err(dev, "quirk map 0x%x is not supported, microphone input will not work\n", map);
95 break;
96 }
97 if (byt_rt5640_quirk & BYT_RT5640_DMIC_EN) {
98 if (has_dmic)
99 dev_info(dev, "quirk DMIC enabled\n");
100 else
101 dev_err(dev, "quirk DMIC enabled but no DMIC input set, will be ignored\n");
102 }
74 if (byt_rt5640_quirk & BYT_RT5640_MONO_SPEAKER) 103 if (byt_rt5640_quirk & BYT_RT5640_MONO_SPEAKER)
75 dev_info(dev, "quirk MONO_SPEAKER enabled"); 104 dev_info(dev, "quirk MONO_SPEAKER enabled\n");
76 if (byt_rt5640_quirk & BYT_RT5640_DIFF_MIC) 105 if (byt_rt5640_quirk & BYT_RT5640_DIFF_MIC) {
77 dev_info(dev, "quirk DIFF_MIC enabled"); 106 if (!has_dmic)
78 if (byt_rt5640_quirk & BYT_RT5640_SSP2_AIF2) 107 dev_info(dev, "quirk DIFF_MIC enabled\n");
79 dev_info(dev, "quirk SSP2_AIF2 enabled"); 108 else
80 if (byt_rt5640_quirk & BYT_RT5640_SSP0_AIF1) 109 dev_info(dev, "quirk DIFF_MIC enabled but DMIC input selected, will be ignored\n");
81 dev_info(dev, "quirk SSP0_AIF1 enabled"); 110 }
82 if (byt_rt5640_quirk & BYT_RT5640_SSP0_AIF2) 111 if (byt_rt5640_quirk & BYT_RT5640_SSP0_AIF1) {
83 dev_info(dev, "quirk SSP0_AIF2 enabled"); 112 dev_info(dev, "quirk SSP0_AIF1 enabled\n");
84 if (byt_rt5640_quirk & BYT_RT5640_MCLK_EN) 113 has_ssp0 = true;
85 dev_info(dev, "quirk MCLK_EN enabled"); 114 has_ssp0_aif1 = true;
86 if (byt_rt5640_quirk & BYT_RT5640_MCLK_25MHZ) 115 }
87 dev_info(dev, "quirk MCLK_25MHZ enabled"); 116 if (byt_rt5640_quirk & BYT_RT5640_SSP0_AIF2) {
117 dev_info(dev, "quirk SSP0_AIF2 enabled\n");
118 has_ssp0 = true;
119 has_ssp0_aif2 = true;
120 }
121 if (byt_rt5640_quirk & BYT_RT5640_SSP2_AIF2) {
122 dev_info(dev, "quirk SSP2_AIF2 enabled\n");
123 has_ssp2_aif2 = true;
124 }
125 if (is_bytcr && !has_ssp0)
126 dev_err(dev, "Invalid routing, bytcr detected but no SSP0-based quirk, audio cannot work with SSP2 on bytcr\n");
127 if (has_ssp0_aif1 && has_ssp0_aif2)
128 dev_err(dev, "Invalid routing, SSP0 cannot be connected to both AIF1 and AIF2\n");
129 if (has_ssp0 && has_ssp2_aif2)
130 dev_err(dev, "Invalid routing, cannot have both SSP0 and SSP2 connected to codec\n");
131
132 if (byt_rt5640_quirk & BYT_RT5640_MCLK_EN) {
133 dev_info(dev, "quirk MCLK_EN enabled\n");
134 has_mclk = true;
135 }
136 if (byt_rt5640_quirk & BYT_RT5640_MCLK_25MHZ) {
137 if (has_mclk)
138 dev_info(dev, "quirk MCLK_25MHZ enabled\n");
139 else
140 dev_err(dev, "quirk MCLK_25MHZ enabled but quirk MCLK not selected, will be ignored\n");
141 }
88} 142}
89 143
90 144
@@ -128,7 +182,7 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
128 ret = clk_prepare_enable(priv->mclk); 182 ret = clk_prepare_enable(priv->mclk);
129 if (ret < 0) { 183 if (ret < 0) {
130 dev_err(card->dev, 184 dev_err(card->dev,
131 "could not configure MCLK state"); 185 "could not configure MCLK state\n");
132 return ret; 186 return ret;
133 } 187 }
134 } 188 }
@@ -710,8 +764,8 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
710 int i; 764 int i;
711 int dai_index; 765 int dai_index;
712 struct byt_rt5640_private *priv; 766 struct byt_rt5640_private *priv;
713 bool is_bytcr = false;
714 767
768 is_bytcr = false;
715 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_ATOMIC); 769 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_ATOMIC);
716 if (!priv) 770 if (!priv)
717 return -ENOMEM; 771 return -ENOMEM;
@@ -806,6 +860,11 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
806 860
807 /* check quirks before creating card */ 861 /* check quirks before creating card */
808 dmi_check_system(byt_rt5640_quirk_table); 862 dmi_check_system(byt_rt5640_quirk_table);
863 if (quirk_override) {
864 dev_info(&pdev->dev, "Overriding quirk 0x%x => 0x%x\n",
865 (unsigned int)byt_rt5640_quirk, quirk_override);
866 byt_rt5640_quirk = quirk_override;
867 }
809 log_quirks(&pdev->dev); 868 log_quirks(&pdev->dev);
810 869
811 if ((byt_rt5640_quirk & BYT_RT5640_SSP2_AIF2) || 870 if ((byt_rt5640_quirk & BYT_RT5640_SSP2_AIF2) ||
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index a3459d1682a6..d33bdaf92c57 100644
--- a/sound/soc/intel/haswell/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -2000,10 +2000,8 @@ int sst_hsw_module_set_param(struct sst_hsw *hsw,
2000 u32 param_size, char *param) 2000 u32 param_size, char *param)
2001{ 2001{
2002 int ret; 2002 int ret;
2003 unsigned char *data = NULL;
2004 u32 header = 0; 2003 u32 header = 0;
2005 u32 payload_size = 0, transfer_parameter_size = 0; 2004 u32 payload_size = 0, transfer_parameter_size = 0;
2006 dma_addr_t dma_addr = 0;
2007 struct sst_hsw_transfer_parameter *parameter; 2005 struct sst_hsw_transfer_parameter *parameter;
2008 struct device *dev = hsw->dev; 2006 struct device *dev = hsw->dev;
2009 2007
@@ -2047,10 +2045,6 @@ int sst_hsw_module_set_param(struct sst_hsw *hsw,
2047 2045
2048 kfree(parameter); 2046 kfree(parameter);
2049 2047
2050 if (data)
2051 dma_free_coherent(hsw->dsp->dma_dev,
2052 param_size, (void *)data, dma_addr);
2053
2054 return ret; 2048 return ret;
2055} 2049}
2056 2050
diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c
index 15a063a403cc..f5e7dbb1ba39 100644
--- a/sound/soc/intel/skylake/bxt-sst.c
+++ b/sound/soc/intel/skylake/bxt-sst.c
@@ -25,7 +25,8 @@
25#include "skl-sst-ipc.h" 25#include "skl-sst-ipc.h"
26 26
27#define BXT_BASEFW_TIMEOUT 3000 27#define BXT_BASEFW_TIMEOUT 3000
28#define BXT_INIT_TIMEOUT 500 28#define BXT_INIT_TIMEOUT 300
29#define BXT_ROM_INIT_TIMEOUT 70
29#define BXT_IPC_PURGE_FW 0x01004000 30#define BXT_IPC_PURGE_FW 0x01004000
30 31
31#define BXT_ROM_INIT 0x5 32#define BXT_ROM_INIT 0x5
@@ -45,6 +46,8 @@
45/* Delay before scheduling D0i3 entry */ 46/* Delay before scheduling D0i3 entry */
46#define BXT_D0I3_DELAY 5000 47#define BXT_D0I3_DELAY 5000
47 48
49#define BXT_FW_ROM_INIT_RETRY 3
50
48static unsigned int bxt_get_errorcode(struct sst_dsp *ctx) 51static unsigned int bxt_get_errorcode(struct sst_dsp *ctx)
49{ 52{
50 return sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE); 53 return sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE);
@@ -55,29 +58,15 @@ bxt_load_library(struct sst_dsp *ctx, struct skl_lib_info *linfo, int lib_count)
55{ 58{
56 struct snd_dma_buffer dmab; 59 struct snd_dma_buffer dmab;
57 struct skl_sst *skl = ctx->thread_context; 60 struct skl_sst *skl = ctx->thread_context;
58 const struct firmware *fw = NULL;
59 struct firmware stripped_fw; 61 struct firmware stripped_fw;
60 int ret = 0, i, dma_id, stream_tag; 62 int ret = 0, i, dma_id, stream_tag;
61 63
62 /* library indices start from 1 to N. 0 represents base FW */ 64 /* library indices start from 1 to N. 0 represents base FW */
63 for (i = 1; i < lib_count; i++) { 65 for (i = 1; i < lib_count; i++) {
64 ret = request_firmware(&fw, linfo[i].name, ctx->dev); 66 ret = skl_prepare_lib_load(skl, &skl->lib_info[i], &stripped_fw,
65 if (ret < 0) {
66 dev_err(ctx->dev, "Request lib %s failed:%d\n",
67 linfo[i].name, ret);
68 return ret;
69 }
70
71 if (skl->is_first_boot) {
72 ret = snd_skl_parse_uuids(ctx, fw,
73 BXT_ADSP_FW_BIN_HDR_OFFSET, i); 67 BXT_ADSP_FW_BIN_HDR_OFFSET, i);
74 if (ret < 0) 68 if (ret < 0)
75 goto load_library_failed; 69 goto load_library_failed;
76 }
77
78 stripped_fw.data = fw->data;
79 stripped_fw.size = fw->size;
80 skl_dsp_strip_extended_manifest(&stripped_fw);
81 70
82 stream_tag = ctx->dsp_ops.prepare(ctx->dev, 0x40, 71 stream_tag = ctx->dsp_ops.prepare(ctx->dev, 0x40,
83 stripped_fw.size, &dmab); 72 stripped_fw.size, &dmab);
@@ -92,21 +81,19 @@ bxt_load_library(struct sst_dsp *ctx, struct skl_lib_info *linfo, int lib_count)
92 memcpy(dmab.area, stripped_fw.data, stripped_fw.size); 81 memcpy(dmab.area, stripped_fw.data, stripped_fw.size);
93 82
94 ctx->dsp_ops.trigger(ctx->dev, true, stream_tag); 83 ctx->dsp_ops.trigger(ctx->dev, true, stream_tag);
95 ret = skl_sst_ipc_load_library(&skl->ipc, dma_id, i); 84 ret = skl_sst_ipc_load_library(&skl->ipc, dma_id, i, true);
96 if (ret < 0) 85 if (ret < 0)
97 dev_err(ctx->dev, "IPC Load Lib for %s fail: %d\n", 86 dev_err(ctx->dev, "IPC Load Lib for %s fail: %d\n",
98 linfo[i].name, ret); 87 linfo[i].name, ret);
99 88
100 ctx->dsp_ops.trigger(ctx->dev, false, stream_tag); 89 ctx->dsp_ops.trigger(ctx->dev, false, stream_tag);
101 ctx->dsp_ops.cleanup(ctx->dev, &dmab, stream_tag); 90 ctx->dsp_ops.cleanup(ctx->dev, &dmab, stream_tag);
102 release_firmware(fw);
103 fw = NULL;
104 } 91 }
105 92
106 return ret; 93 return ret;
107 94
108load_library_failed: 95load_library_failed:
109 release_firmware(fw); 96 skl_release_library(linfo, lib_count);
110 return ret; 97 return ret;
111} 98}
112 99
@@ -156,7 +143,7 @@ static int sst_bxt_prepare_fw(struct sst_dsp *ctx,
156 SKL_ADSP_REG_HIPCIE_DONE, 143 SKL_ADSP_REG_HIPCIE_DONE,
157 BXT_INIT_TIMEOUT, "HIPCIE Done"); 144 BXT_INIT_TIMEOUT, "HIPCIE Done");
158 if (ret < 0) { 145 if (ret < 0) {
159 dev_err(ctx->dev, "Timout for Purge Request%d\n", ret); 146 dev_err(ctx->dev, "Timeout for Purge Request%d\n", ret);
160 goto base_fw_load_failed; 147 goto base_fw_load_failed;
161 } 148 }
162 149
@@ -173,7 +160,7 @@ static int sst_bxt_prepare_fw(struct sst_dsp *ctx,
173 160
174 /* Step 7: Wait for ROM init */ 161 /* Step 7: Wait for ROM init */
175 ret = sst_dsp_register_poll(ctx, BXT_ADSP_FW_STATUS, SKL_FW_STS_MASK, 162 ret = sst_dsp_register_poll(ctx, BXT_ADSP_FW_STATUS, SKL_FW_STS_MASK,
176 SKL_FW_INIT, BXT_INIT_TIMEOUT, "ROM Load"); 163 SKL_FW_INIT, BXT_ROM_INIT_TIMEOUT, "ROM Load");
177 if (ret < 0) { 164 if (ret < 0) {
178 dev_err(ctx->dev, "Timeout for ROM init, ret:%d\n", ret); 165 dev_err(ctx->dev, "Timeout for ROM init, ret:%d\n", ret);
179 goto base_fw_load_failed; 166 goto base_fw_load_failed;
@@ -206,18 +193,16 @@ static int bxt_load_base_firmware(struct sst_dsp *ctx)
206{ 193{
207 struct firmware stripped_fw; 194 struct firmware stripped_fw;
208 struct skl_sst *skl = ctx->thread_context; 195 struct skl_sst *skl = ctx->thread_context;
209 int ret; 196 int ret, i;
210 197
211 ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev); 198 if (ctx->fw == NULL) {
212 if (ret < 0) { 199 ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev);
213 dev_err(ctx->dev, "Request firmware failed %d\n", ret); 200 if (ret < 0) {
214 goto sst_load_base_firmware_failed; 201 dev_err(ctx->dev, "Request firmware failed %d\n", ret);
202 return ret;
203 }
215 } 204 }
216 205
217 /* check for extended manifest */
218 if (ctx->fw == NULL)
219 goto sst_load_base_firmware_failed;
220
221 /* prase uuids on first boot */ 206 /* prase uuids on first boot */
222 if (skl->is_first_boot) { 207 if (skl->is_first_boot) {
223 ret = snd_skl_parse_uuids(ctx, ctx->fw, BXT_ADSP_FW_BIN_HDR_OFFSET, 0); 208 ret = snd_skl_parse_uuids(ctx, ctx->fw, BXT_ADSP_FW_BIN_HDR_OFFSET, 0);
@@ -229,18 +214,20 @@ static int bxt_load_base_firmware(struct sst_dsp *ctx)
229 stripped_fw.size = ctx->fw->size; 214 stripped_fw.size = ctx->fw->size;
230 skl_dsp_strip_extended_manifest(&stripped_fw); 215 skl_dsp_strip_extended_manifest(&stripped_fw);
231 216
232 ret = sst_bxt_prepare_fw(ctx, stripped_fw.data, stripped_fw.size); 217
233 /* Retry Enabling core and ROM load. Retry seemed to help */ 218 for (i = 0; i < BXT_FW_ROM_INIT_RETRY; i++) {
234 if (ret < 0) {
235 ret = sst_bxt_prepare_fw(ctx, stripped_fw.data, stripped_fw.size); 219 ret = sst_bxt_prepare_fw(ctx, stripped_fw.data, stripped_fw.size);
236 if (ret < 0) { 220 if (ret == 0)
237 dev_err(ctx->dev, "Error code=0x%x: FW status=0x%x\n", 221 break;
222 }
223
224 if (ret < 0) {
225 dev_err(ctx->dev, "Error code=0x%x: FW status=0x%x\n",
238 sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE), 226 sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE),
239 sst_dsp_shim_read(ctx, BXT_ADSP_FW_STATUS)); 227 sst_dsp_shim_read(ctx, BXT_ADSP_FW_STATUS));
240 228
241 dev_err(ctx->dev, "Core En/ROM load fail:%d\n", ret); 229 dev_err(ctx->dev, "Core En/ROM load fail:%d\n", ret);
242 goto sst_load_base_firmware_failed; 230 goto sst_load_base_firmware_failed;
243 }
244 } 231 }
245 232
246 ret = sst_transfer_fw_host_dma(ctx); 233 ret = sst_transfer_fw_host_dma(ctx);
@@ -265,8 +252,11 @@ static int bxt_load_base_firmware(struct sst_dsp *ctx)
265 } 252 }
266 } 253 }
267 254
255 return ret;
256
268sst_load_base_firmware_failed: 257sst_load_base_firmware_failed:
269 release_firmware(ctx->fw); 258 release_firmware(ctx->fw);
259 ctx->fw = NULL;
270 return ret; 260 return ret;
271} 261}
272 262
@@ -428,6 +418,7 @@ static int bxt_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
428 return ret; 418 return ret;
429 } 419 }
430 } 420 }
421 skl->cores.state[core_id] = SKL_DSP_RUNNING;
431 return ret; 422 return ret;
432 } 423 }
433 424
@@ -514,11 +505,22 @@ static int bxt_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id)
514 505
515 ret = skl_ipc_set_dx(&skl->ipc, BXT_INSTANCE_ID, 506 ret = skl_ipc_set_dx(&skl->ipc, BXT_INSTANCE_ID,
516 BXT_BASE_FW_MODULE_ID, &dx); 507 BXT_BASE_FW_MODULE_ID, &dx);
517 if (ret < 0) 508 if (ret < 0) {
518 dev_err(ctx->dev, 509 dev_err(ctx->dev,
519 "Failed to set DSP to D3:core id = %d;Continue reset\n", 510 "Failed to set DSP to D3:core id = %d;Continue reset\n",
520 core_id); 511 core_id);
512 /*
513 * In case of D3 failure, re-download the firmware, so set
514 * fw_loaded to false.
515 */
516 skl->fw_loaded = false;
517 }
521 518
519 if (core_id == SKL_DSP_CORE0_ID) {
520 /* disable Interrupt */
521 skl_ipc_op_int_disable(ctx);
522 skl_ipc_int_disable(ctx);
523 }
522 ret = skl_dsp_disable_core(ctx, core_mask); 524 ret = skl_dsp_disable_core(ctx, core_mask);
523 if (ret < 0) { 525 if (ret < 0) {
524 dev_err(ctx->dev, "Failed to disable core %d\n", ret); 526 dev_err(ctx->dev, "Failed to disable core %d\n", ret);
@@ -560,23 +562,14 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
560 struct sst_dsp *sst; 562 struct sst_dsp *sst;
561 int ret; 563 int ret;
562 564
563 skl = devm_kzalloc(dev, sizeof(*skl), GFP_KERNEL); 565 ret = skl_sst_ctx_init(dev, irq, fw_name, dsp_ops, dsp, &skl_dev);
564 if (skl == NULL) 566 if (ret < 0) {
565 return -ENOMEM; 567 dev_err(dev, "%s: no device\n", __func__);
566 568 return ret;
567 skl->dev = dev;
568 skl_dev.thread_context = skl;
569 INIT_LIST_HEAD(&skl->uuid_list);
570
571 skl->dsp = skl_dsp_ctx_init(dev, &skl_dev, irq);
572 if (!skl->dsp) {
573 dev_err(skl->dev, "skl_dsp_ctx_init failed\n");
574 return -ENODEV;
575 } 569 }
576 570
571 skl = *dsp;
577 sst = skl->dsp; 572 sst = skl->dsp;
578 sst->fw_name = fw_name;
579 sst->dsp_ops = dsp_ops;
580 sst->fw_ops = bxt_fw_ops; 573 sst->fw_ops = bxt_fw_ops;
581 sst->addr.lpe = mmio_base; 574 sst->addr.lpe = mmio_base;
582 sst->addr.shim = mmio_base; 575 sst->addr.shim = mmio_base;
@@ -584,24 +577,15 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
584 sst_dsp_mailbox_init(sst, (BXT_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ), 577 sst_dsp_mailbox_init(sst, (BXT_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
585 SKL_ADSP_W0_UP_SZ, BXT_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ); 578 SKL_ADSP_W0_UP_SZ, BXT_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
586 579
587 INIT_LIST_HEAD(&sst->module_list);
588 ret = skl_ipc_init(dev, skl);
589 if (ret)
590 return ret;
591
592 /* set the D0i3 check */ 580 /* set the D0i3 check */
593 skl->ipc.ops.check_dsp_lp_on = skl_ipc_check_D0i0; 581 skl->ipc.ops.check_dsp_lp_on = skl_ipc_check_D0i0;
594 582
595 skl->cores.count = 2; 583 skl->cores.count = 2;
596 skl->boot_complete = false; 584 skl->boot_complete = false;
597 init_waitqueue_head(&skl->boot_wait); 585 init_waitqueue_head(&skl->boot_wait);
598 skl->is_first_boot = true;
599 INIT_DELAYED_WORK(&skl->d0i3.work, bxt_set_dsp_D0i3); 586 INIT_DELAYED_WORK(&skl->d0i3.work, bxt_set_dsp_D0i3);
600 skl->d0i3.state = SKL_DSP_D0I3_NONE; 587 skl->d0i3.state = SKL_DSP_D0I3_NONE;
601 588
602 if (dsp)
603 *dsp = skl;
604
605 return 0; 589 return 0;
606} 590}
607EXPORT_SYMBOL_GPL(bxt_sst_dsp_init); 591EXPORT_SYMBOL_GPL(bxt_sst_dsp_init);
@@ -635,6 +619,10 @@ EXPORT_SYMBOL_GPL(bxt_sst_init_fw);
635 619
636void bxt_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx) 620void bxt_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
637{ 621{
622
623 skl_release_library(ctx->lib_info, ctx->lib_count);
624 if (ctx->dsp->fw)
625 release_firmware(ctx->dsp->fw);
638 skl_freeup_uuid_list(ctx); 626 skl_freeup_uuid_list(ctx);
639 skl_ipc_free(&ctx->ipc); 627 skl_ipc_free(&ctx->ipc);
640 ctx->dsp->cl_dev.ops.cl_cleanup_controller(ctx->dsp); 628 ctx->dsp->cl_dev.ops.cl_cleanup_controller(ctx->dsp);
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
index e66870474f10..ab1adc0c9cc3 100644
--- a/sound/soc/intel/skylake/skl-messages.c
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -58,7 +58,7 @@ static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab)
58#define NOTIFICATION_MASK 0xf 58#define NOTIFICATION_MASK 0xf
59 59
60/* disable notfication for underruns/overruns from firmware module */ 60/* disable notfication for underruns/overruns from firmware module */
61static void skl_dsp_enable_notification(struct skl_sst *ctx, bool enable) 61void skl_dsp_enable_notification(struct skl_sst *ctx, bool enable)
62{ 62{
63 struct notification_mask mask; 63 struct notification_mask mask;
64 struct skl_ipc_large_config_msg msg = {0}; 64 struct skl_ipc_large_config_msg msg = {0};
@@ -209,7 +209,7 @@ static const struct skl_dsp_ops dsp_ops[] = {
209 { 209 {
210 .id = 0x9d71, 210 .id = 0x9d71,
211 .loader_ops = skl_get_loader_ops, 211 .loader_ops = skl_get_loader_ops,
212 .init = skl_sst_dsp_init, 212 .init = kbl_sst_dsp_init,
213 .init_fw = skl_sst_init_fw, 213 .init_fw = skl_sst_init_fw,
214 .cleanup = skl_sst_dsp_cleanup 214 .cleanup = skl_sst_dsp_cleanup
215 }, 215 },
@@ -274,6 +274,7 @@ int skl_init_dsp(struct skl *skl)
274 if (ret < 0) 274 if (ret < 0)
275 return ret; 275 return ret;
276 276
277 skl->skl_sst->dsp_ops = ops;
277 dev_dbg(bus->dev, "dsp registration status=%d\n", ret); 278 dev_dbg(bus->dev, "dsp registration status=%d\n", ret);
278 279
279 return ret; 280 return ret;
@@ -284,16 +285,11 @@ int skl_free_dsp(struct skl *skl)
284 struct hdac_ext_bus *ebus = &skl->ebus; 285 struct hdac_ext_bus *ebus = &skl->ebus;
285 struct hdac_bus *bus = ebus_to_hbus(ebus); 286 struct hdac_bus *bus = ebus_to_hbus(ebus);
286 struct skl_sst *ctx = skl->skl_sst; 287 struct skl_sst *ctx = skl->skl_sst;
287 const struct skl_dsp_ops *ops;
288 288
289 /* disable ppcap interrupt */ 289 /* disable ppcap interrupt */
290 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false); 290 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false);
291 291
292 ops = skl_get_dsp_ops(skl->pci->device); 292 ctx->dsp_ops->cleanup(bus->dev, ctx);
293 if (!ops)
294 return -EIO;
295
296 ops->cleanup(bus->dev, ctx);
297 293
298 if (ctx->dsp->addr.lpe) 294 if (ctx->dsp->addr.lpe)
299 iounmap(ctx->dsp->addr.lpe); 295 iounmap(ctx->dsp->addr.lpe);
@@ -866,7 +862,7 @@ static void skl_clear_module_state(struct skl_module_pin *mpin, int max,
866 } 862 }
867 863
868 if (!found) 864 if (!found)
869 mcfg->m_state = SKL_MODULE_UNINIT; 865 mcfg->m_state = SKL_MODULE_INIT_DONE;
870 return; 866 return;
871} 867}
872 868
@@ -1098,7 +1094,7 @@ int skl_delete_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
1098 dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id); 1094 dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
1099 1095
1100 /* If pipe is started, do stop the pipe in FW. */ 1096 /* If pipe is started, do stop the pipe in FW. */
1101 if (pipe->state > SKL_PIPE_STARTED) { 1097 if (pipe->state >= SKL_PIPE_STARTED) {
1102 ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED); 1098 ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
1103 if (ret < 0) { 1099 if (ret < 0) {
1104 dev_err(ctx->dev, "Failed to stop pipeline\n"); 1100 dev_err(ctx->dev, "Failed to stop pipeline\n");
diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
index 7eb9c419dc7f..e3f06672fd6d 100644
--- a/sound/soc/intel/skylake/skl-nhlt.c
+++ b/sound/soc/intel/skylake/skl-nhlt.c
@@ -24,8 +24,6 @@
24static u8 OSC_UUID[16] = {0x6E, 0x88, 0x9F, 0xA6, 0xEB, 0x6C, 0x94, 0x45, 24static u8 OSC_UUID[16] = {0x6E, 0x88, 0x9F, 0xA6, 0xEB, 0x6C, 0x94, 0x45,
25 0xA4, 0x1F, 0x7B, 0x5D, 0xCE, 0x24, 0xC5, 0x53}; 25 0xA4, 0x1F, 0x7B, 0x5D, 0xCE, 0x24, 0xC5, 0x53};
26 26
27#define DSDT_NHLT_PATH "\\_SB.PCI0.HDAS"
28
29struct nhlt_acpi_table *skl_nhlt_init(struct device *dev) 27struct nhlt_acpi_table *skl_nhlt_init(struct device *dev)
30{ 28{
31 acpi_handle handle; 29 acpi_handle handle;
@@ -33,8 +31,9 @@ struct nhlt_acpi_table *skl_nhlt_init(struct device *dev)
33 struct nhlt_resource_desc *nhlt_ptr = NULL; 31 struct nhlt_resource_desc *nhlt_ptr = NULL;
34 struct nhlt_acpi_table *nhlt_table = NULL; 32 struct nhlt_acpi_table *nhlt_table = NULL;
35 33
36 if (ACPI_FAILURE(acpi_get_handle(NULL, DSDT_NHLT_PATH, &handle))) { 34 handle = ACPI_HANDLE(dev);
37 dev_err(dev, "Requested NHLT device not found\n"); 35 if (!handle) {
36 dev_err(dev, "Didn't find ACPI_HANDLE\n");
38 return NULL; 37 return NULL;
39 } 38 }
40 39
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index e12520e142ff..e91bbcffc856 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/pm_runtime.h> 23#include <linux/pm_runtime.h>
24#include <linux/delay.h>
24#include <sound/pcm_params.h> 25#include <sound/pcm_params.h>
25#include <sound/soc.h> 26#include <sound/soc.h>
26#include "skl.h" 27#include "skl.h"
@@ -155,7 +156,7 @@ int skl_pcm_host_dma_prepare(struct device *dev, struct skl_pipe_params *params)
155 snd_hdac_ext_stream_decouple(ebus, stream, true); 156 snd_hdac_ext_stream_decouple(ebus, stream, true);
156 157
157 format_val = snd_hdac_calc_stream_format(params->s_freq, 158 format_val = snd_hdac_calc_stream_format(params->s_freq,
158 params->ch, params->format, 32, 0); 159 params->ch, params->format, params->host_bps, 0);
159 160
160 dev_dbg(dev, "format_val=%d, rate=%d, ch=%d, format=%d\n", 161 dev_dbg(dev, "format_val=%d, rate=%d, ch=%d, format=%d\n",
161 format_val, params->s_freq, params->ch, params->format); 162 format_val, params->s_freq, params->ch, params->format);
@@ -190,8 +191,8 @@ int skl_pcm_link_dma_prepare(struct device *dev, struct skl_pipe_params *params)
190 191
191 stream = stream_to_hdac_ext_stream(hstream); 192 stream = stream_to_hdac_ext_stream(hstream);
192 snd_hdac_ext_stream_decouple(ebus, stream, true); 193 snd_hdac_ext_stream_decouple(ebus, stream, true);
193 format_val = snd_hdac_calc_stream_format(params->s_freq, 194 format_val = snd_hdac_calc_stream_format(params->s_freq, params->ch,
194 params->ch, params->format, 24, 0); 195 params->format, params->link_bps, 0);
195 196
196 dev_dbg(dev, "format_val=%d, rate=%d, ch=%d, format=%d\n", 197 dev_dbg(dev, "format_val=%d, rate=%d, ch=%d, format=%d\n",
197 format_val, params->s_freq, params->ch, params->format); 198 format_val, params->s_freq, params->ch, params->format);
@@ -262,23 +263,6 @@ static int skl_pcm_open(struct snd_pcm_substream *substream,
262 return 0; 263 return 0;
263} 264}
264 265
265static int skl_be_prepare(struct snd_pcm_substream *substream,
266 struct snd_soc_dai *dai)
267{
268 struct skl *skl = get_skl_ctx(dai->dev);
269 struct skl_sst *ctx = skl->skl_sst;
270 struct skl_module_cfg *mconfig;
271
272 if (dai->playback_widget->power || dai->capture_widget->power)
273 return 0;
274
275 mconfig = skl_tplg_be_get_cpr_module(dai, substream->stream);
276 if (mconfig == NULL)
277 return -EINVAL;
278
279 return skl_dsp_set_dma_control(ctx, mconfig);
280}
281
282static int skl_pcm_prepare(struct snd_pcm_substream *substream, 266static int skl_pcm_prepare(struct snd_pcm_substream *substream,
283 struct snd_soc_dai *dai) 267 struct snd_soc_dai *dai)
284{ 268{
@@ -326,6 +310,11 @@ static int skl_pcm_hw_params(struct snd_pcm_substream *substream,
326 p_params.host_dma_id = dma_id; 310 p_params.host_dma_id = dma_id;
327 p_params.stream = substream->stream; 311 p_params.stream = substream->stream;
328 p_params.format = params_format(params); 312 p_params.format = params_format(params);
313 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
314 p_params.host_bps = dai->driver->playback.sig_bits;
315 else
316 p_params.host_bps = dai->driver->capture.sig_bits;
317
329 318
330 m_cfg = skl_tplg_fe_get_cpr_module(dai, p_params.stream); 319 m_cfg = skl_tplg_fe_get_cpr_module(dai, p_params.stream);
331 if (m_cfg) 320 if (m_cfg)
@@ -564,6 +553,11 @@ static int skl_link_hw_params(struct snd_pcm_substream *substream,
564 p_params.link_index = link->index; 553 p_params.link_index = link->index;
565 p_params.format = params_format(params); 554 p_params.format = params_format(params);
566 555
556 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
557 p_params.link_bps = codec_dai->driver->playback.sig_bits;
558 else
559 p_params.link_bps = codec_dai->driver->capture.sig_bits;
560
567 return skl_tplg_be_update_params(dai, &p_params); 561 return skl_tplg_be_update_params(dai, &p_params);
568} 562}
569 563
@@ -649,7 +643,6 @@ static struct snd_soc_dai_ops skl_dmic_dai_ops = {
649 643
650static struct snd_soc_dai_ops skl_be_ssp_dai_ops = { 644static struct snd_soc_dai_ops skl_be_ssp_dai_ops = {
651 .hw_params = skl_be_hw_params, 645 .hw_params = skl_be_hw_params,
652 .prepare = skl_be_prepare,
653}; 646};
654 647
655static struct snd_soc_dai_ops skl_link_dai_ops = { 648static struct snd_soc_dai_ops skl_link_dai_ops = {
@@ -670,6 +663,7 @@ static struct snd_soc_dai_driver skl_platform_dai[] = {
670 .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_8000, 663 .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_8000,
671 .formats = SNDRV_PCM_FMTBIT_S16_LE | 664 .formats = SNDRV_PCM_FMTBIT_S16_LE |
672 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE, 665 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE,
666 .sig_bits = 32,
673 }, 667 },
674 .capture = { 668 .capture = {
675 .stream_name = "System Capture", 669 .stream_name = "System Capture",
@@ -677,6 +671,7 @@ static struct snd_soc_dai_driver skl_platform_dai[] = {
677 .channels_max = HDA_STEREO, 671 .channels_max = HDA_STEREO,
678 .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000, 672 .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000,
679 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, 673 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
674 .sig_bits = 32,
680 }, 675 },
681}, 676},
682{ 677{
@@ -688,6 +683,7 @@ static struct snd_soc_dai_driver skl_platform_dai[] = {
688 .channels_max = HDA_QUAD, 683 .channels_max = HDA_QUAD,
689 .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000, 684 .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000,
690 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, 685 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
686 .sig_bits = 32,
691 }, 687 },
692}, 688},
693{ 689{
@@ -699,6 +695,7 @@ static struct snd_soc_dai_driver skl_platform_dai[] = {
699 .channels_max = HDA_STEREO, 695 .channels_max = HDA_STEREO,
700 .rates = SNDRV_PCM_RATE_48000, 696 .rates = SNDRV_PCM_RATE_48000,
701 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, 697 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
698 .sig_bits = 32,
702 }, 699 },
703}, 700},
704{ 701{
@@ -710,6 +707,7 @@ static struct snd_soc_dai_driver skl_platform_dai[] = {
710 .channels_max = HDA_STEREO, 707 .channels_max = HDA_STEREO,
711 .rates = SNDRV_PCM_RATE_48000, 708 .rates = SNDRV_PCM_RATE_48000,
712 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, 709 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
710 .sig_bits = 32,
713 }, 711 },
714}, 712},
715{ 713{
@@ -721,6 +719,7 @@ static struct snd_soc_dai_driver skl_platform_dai[] = {
721 .channels_max = HDA_QUAD, 719 .channels_max = HDA_QUAD,
722 .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000, 720 .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000,
723 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, 721 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
722 .sig_bits = 32,
724 }, 723 },
725}, 724},
726{ 725{
@@ -736,6 +735,7 @@ static struct snd_soc_dai_driver skl_platform_dai[] = {
736 SNDRV_PCM_RATE_192000, 735 SNDRV_PCM_RATE_192000,
737 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | 736 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
738 SNDRV_PCM_FMTBIT_S32_LE, 737 SNDRV_PCM_FMTBIT_S32_LE,
738 .sig_bits = 32,
739 }, 739 },
740}, 740},
741{ 741{
@@ -751,6 +751,7 @@ static struct snd_soc_dai_driver skl_platform_dai[] = {
751 SNDRV_PCM_RATE_192000, 751 SNDRV_PCM_RATE_192000,
752 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | 752 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
753 SNDRV_PCM_FMTBIT_S32_LE, 753 SNDRV_PCM_FMTBIT_S32_LE,
754 .sig_bits = 32,
754 }, 755 },
755}, 756},
756{ 757{
@@ -766,6 +767,7 @@ static struct snd_soc_dai_driver skl_platform_dai[] = {
766 SNDRV_PCM_RATE_192000, 767 SNDRV_PCM_RATE_192000,
767 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | 768 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
768 SNDRV_PCM_FMTBIT_S32_LE, 769 SNDRV_PCM_FMTBIT_S32_LE,
770 .sig_bits = 32,
769 }, 771 },
770}, 772},
771 773
@@ -949,14 +951,12 @@ static struct snd_soc_dai_driver skl_platform_dai[] = {
949 951
950static int skl_platform_open(struct snd_pcm_substream *substream) 952static int skl_platform_open(struct snd_pcm_substream *substream)
951{ 953{
952 struct snd_pcm_runtime *runtime;
953 struct snd_soc_pcm_runtime *rtd = substream->private_data; 954 struct snd_soc_pcm_runtime *rtd = substream->private_data;
954 struct snd_soc_dai_link *dai_link = rtd->dai_link; 955 struct snd_soc_dai_link *dai_link = rtd->dai_link;
955 956
956 dev_dbg(rtd->cpu_dai->dev, "In %s:%s\n", __func__, 957 dev_dbg(rtd->cpu_dai->dev, "In %s:%s\n", __func__,
957 dai_link->cpu_dai_name); 958 dai_link->cpu_dai_name);
958 959
959 runtime = substream->runtime;
960 snd_soc_set_runtime_hwparams(substream, &azx_pcm_hw); 960 snd_soc_set_runtime_hwparams(substream, &azx_pcm_hw);
961 961
962 return 0; 962 return 0;
@@ -1062,13 +1062,31 @@ static snd_pcm_uframes_t skl_platform_pcm_pointer
1062 * HAD space reflects the actual data that is transferred. 1062 * HAD space reflects the actual data that is transferred.
1063 * Use the position buffer for capture, as DPIB write gets 1063 * Use the position buffer for capture, as DPIB write gets
1064 * completed earlier than the actual data written to the DDR. 1064 * completed earlier than the actual data written to the DDR.
1065 *
1066 * For capture stream following workaround is required to fix the
1067 * incorrect position reporting.
1068 *
1069 * 1. Wait for 20us before reading the DMA position in buffer once
1070 * the interrupt is generated for stream completion as update happens
1071 * on the HDA frame boundary i.e. 20.833uSec.
1072 * 2. Read DPIB register to flush the DMA position value. This dummy
1073 * read is required to flush DMA position value.
1074 * 3. Read the DMA Position-in-Buffer. This value now will be equal to
1075 * or greater than period boundary.
1065 */ 1076 */
1066 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 1077
1078 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1067 pos = readl(ebus->bus.remap_addr + AZX_REG_VS_SDXDPIB_XBASE + 1079 pos = readl(ebus->bus.remap_addr + AZX_REG_VS_SDXDPIB_XBASE +
1068 (AZX_REG_VS_SDXDPIB_XINTERVAL * 1080 (AZX_REG_VS_SDXDPIB_XINTERVAL *
1069 hdac_stream(hstream)->index)); 1081 hdac_stream(hstream)->index));
1070 else 1082 } else {
1083 udelay(20);
1084 readl(ebus->bus.remap_addr +
1085 AZX_REG_VS_SDXDPIB_XBASE +
1086 (AZX_REG_VS_SDXDPIB_XINTERVAL *
1087 hdac_stream(hstream)->index));
1071 pos = snd_hdac_stream_get_pos_posbuf(hdac_stream(hstream)); 1088 pos = snd_hdac_stream_get_pos_posbuf(hdac_stream(hstream));
1089 }
1072 1090
1073 if (pos >= hdac_stream(hstream)->bufsize) 1091 if (pos >= hdac_stream(hstream)->bufsize)
1074 pos = 0; 1092 pos = 0;
@@ -1165,7 +1183,7 @@ static int skl_pcm_new(struct snd_soc_pcm_runtime *rtd)
1165 snd_dma_pci_data(skl->pci), 1183 snd_dma_pci_data(skl->pci),
1166 size, MAX_PREALLOC_SIZE); 1184 size, MAX_PREALLOC_SIZE);
1167 if (retval) { 1185 if (retval) {
1168 dev_err(dai->dev, "dma buffer allocationf fail\n"); 1186 dev_err(dai->dev, "dma buffer allocation fail\n");
1169 return retval; 1187 return retval;
1170 } 1188 }
1171 } 1189 }
@@ -1173,29 +1191,52 @@ static int skl_pcm_new(struct snd_soc_pcm_runtime *rtd)
1173 return retval; 1191 return retval;
1174} 1192}
1175 1193
1194static int skl_get_module_info(struct skl *skl, struct skl_module_cfg *mconfig)
1195{
1196 struct skl_sst *ctx = skl->skl_sst;
1197 struct uuid_module *module;
1198 uuid_le *uuid_mod;
1199
1200 uuid_mod = (uuid_le *)mconfig->guid;
1201
1202 if (list_empty(&ctx->uuid_list)) {
1203 dev_err(ctx->dev, "Module list is empty\n");
1204 return -EIO;
1205 }
1206
1207 list_for_each_entry(module, &ctx->uuid_list, list) {
1208 if (uuid_le_cmp(*uuid_mod, module->uuid) == 0) {
1209 mconfig->id.module_id = module->id;
1210 mconfig->is_loadable = module->is_loadable;
1211 return 0;
1212 }
1213 }
1214
1215 return -EIO;
1216}
1217
1176static int skl_populate_modules(struct skl *skl) 1218static int skl_populate_modules(struct skl *skl)
1177{ 1219{
1178 struct skl_pipeline *p; 1220 struct skl_pipeline *p;
1179 struct skl_pipe_module *m; 1221 struct skl_pipe_module *m;
1180 struct snd_soc_dapm_widget *w; 1222 struct snd_soc_dapm_widget *w;
1181 struct skl_module_cfg *mconfig; 1223 struct skl_module_cfg *mconfig;
1182 int ret; 1224 int ret = 0;
1183 1225
1184 list_for_each_entry(p, &skl->ppl_list, node) { 1226 list_for_each_entry(p, &skl->ppl_list, node) {
1185 list_for_each_entry(m, &p->pipe->w_list, node) { 1227 list_for_each_entry(m, &p->pipe->w_list, node) {
1186
1187 w = m->w; 1228 w = m->w;
1188 mconfig = w->priv; 1229 mconfig = w->priv;
1189 1230
1190 ret = snd_skl_get_module_info(skl->skl_sst, mconfig); 1231 ret = skl_get_module_info(skl, mconfig);
1191 if (ret < 0) { 1232 if (ret < 0) {
1192 dev_err(skl->skl_sst->dev, 1233 dev_err(skl->skl_sst->dev,
1193 "query module info failed:%d\n", ret); 1234 "query module info failed\n");
1194 goto err; 1235 return ret;
1195 } 1236 }
1196 } 1237 }
1197 } 1238 }
1198err: 1239
1199 return ret; 1240 return ret;
1200} 1241}
1201 1242
@@ -1232,6 +1273,7 @@ static int skl_platform_soc_probe(struct snd_soc_platform *platform)
1232 } 1273 }
1233 skl_populate_modules(skl); 1274 skl_populate_modules(skl);
1234 skl->skl_sst->update_d0i3c = skl_update_d0i3c; 1275 skl->skl_sst->update_d0i3c = skl_update_d0i3c;
1276 skl_dsp_enable_notification(skl->skl_sst, false);
1235 } 1277 }
1236 pm_runtime_mark_last_busy(platform->dev); 1278 pm_runtime_mark_last_busy(platform->dev);
1237 pm_runtime_put_autosuspend(platform->dev); 1279 pm_runtime_put_autosuspend(platform->dev);
@@ -1256,6 +1298,7 @@ int skl_platform_register(struct device *dev)
1256 struct skl *skl = ebus_to_skl(ebus); 1298 struct skl *skl = ebus_to_skl(ebus);
1257 1299
1258 INIT_LIST_HEAD(&skl->ppl_list); 1300 INIT_LIST_HEAD(&skl->ppl_list);
1301 INIT_LIST_HEAD(&skl->bind_list);
1259 1302
1260 ret = snd_soc_register_platform(dev, &skl_platform_drv); 1303 ret = snd_soc_register_platform(dev, &skl_platform_drv);
1261 if (ret) { 1304 if (ret) {
@@ -1276,6 +1319,17 @@ int skl_platform_register(struct device *dev)
1276 1319
1277int skl_platform_unregister(struct device *dev) 1320int skl_platform_unregister(struct device *dev)
1278{ 1321{
1322 struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
1323 struct skl *skl = ebus_to_skl(ebus);
1324 struct skl_module_deferred_bind *modules, *tmp;
1325
1326 if (!list_empty(&skl->bind_list)) {
1327 list_for_each_entry_safe(modules, tmp, &skl->bind_list, node) {
1328 list_del(&modules->node);
1329 kfree(modules);
1330 }
1331 }
1332
1279 snd_soc_unregister_component(dev); 1333 snd_soc_unregister_component(dev);
1280 snd_soc_unregister_platform(dev); 1334 snd_soc_unregister_platform(dev);
1281 return 0; 1335 return 0;
diff --git a/sound/soc/intel/skylake/skl-sst-cldma.c b/sound/soc/intel/skylake/skl-sst-cldma.c
index c9f6d87381db..d2b1d60fec02 100644
--- a/sound/soc/intel/skylake/skl-sst-cldma.c
+++ b/sound/soc/intel/skylake/skl-sst-cldma.c
@@ -164,7 +164,7 @@ static void skl_cldma_cleanup(struct sst_dsp *ctx)
164 ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_bdl); 164 ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_bdl);
165} 165}
166 166
167static int skl_cldma_wait_interruptible(struct sst_dsp *ctx) 167int skl_cldma_wait_interruptible(struct sst_dsp *ctx)
168{ 168{
169 int ret = 0; 169 int ret = 0;
170 170
@@ -243,9 +243,14 @@ static void skl_cldma_fill_buffer(struct sst_dsp *ctx, unsigned int size,
243 * 2. Polling on fw register to identify if data left to transferred doesn't 243 * 2. Polling on fw register to identify if data left to transferred doesn't
244 * fill the ring buffer. Caller takes care of polling the required status 244 * fill the ring buffer. Caller takes care of polling the required status
245 * register to identify the transfer status. 245 * register to identify the transfer status.
246 * 3. if wait flag is set, waits for DBL interrupt to copy the next chunk till
247 * bytes_left is 0.
248 * if wait flag is not set, doesn't wait for BDL interrupt. after ccopying
249 * the first chunk return the no of bytes_left to be copied.
246 */ 250 */
247static int 251static int
248skl_cldma_copy_to_buf(struct sst_dsp *ctx, const void *bin, u32 total_size) 252skl_cldma_copy_to_buf(struct sst_dsp *ctx, const void *bin,
253 u32 total_size, bool wait)
249{ 254{
250 int ret = 0; 255 int ret = 0;
251 bool start = true; 256 bool start = true;
@@ -272,13 +277,14 @@ skl_cldma_copy_to_buf(struct sst_dsp *ctx, const void *bin, u32 total_size)
272 size = ctx->cl_dev.bufsize; 277 size = ctx->cl_dev.bufsize;
273 skl_cldma_fill_buffer(ctx, size, curr_pos, true, start); 278 skl_cldma_fill_buffer(ctx, size, curr_pos, true, start);
274 279
275 start = false; 280 if (wait) {
276 ret = skl_cldma_wait_interruptible(ctx); 281 start = false;
277 if (ret < 0) { 282 ret = skl_cldma_wait_interruptible(ctx);
278 skl_cldma_stop(ctx); 283 if (ret < 0) {
279 return ret; 284 skl_cldma_stop(ctx);
285 return ret;
286 }
280 } 287 }
281
282 } else { 288 } else {
283 skl_cldma_int_disable(ctx); 289 skl_cldma_int_disable(ctx);
284 290
@@ -298,9 +304,11 @@ skl_cldma_copy_to_buf(struct sst_dsp *ctx, const void *bin, u32 total_size)
298 } 304 }
299 bytes_left -= size; 305 bytes_left -= size;
300 curr_pos = curr_pos + size; 306 curr_pos = curr_pos + size;
307 if (!wait)
308 return bytes_left;
301 } 309 }
302 310
303 return ret; 311 return bytes_left;
304} 312}
305 313
306void skl_cldma_process_intr(struct sst_dsp *ctx) 314void skl_cldma_process_intr(struct sst_dsp *ctx)
diff --git a/sound/soc/intel/skylake/skl-sst-cldma.h b/sound/soc/intel/skylake/skl-sst-cldma.h
index 99e4c86b6358..5b730a1a0ae4 100644
--- a/sound/soc/intel/skylake/skl-sst-cldma.h
+++ b/sound/soc/intel/skylake/skl-sst-cldma.h
@@ -213,7 +213,7 @@ struct skl_cl_dev_ops {
213 void (*cl_trigger)(struct sst_dsp *ctx, bool enable); 213 void (*cl_trigger)(struct sst_dsp *ctx, bool enable);
214 void (*cl_cleanup_controller)(struct sst_dsp *ctx); 214 void (*cl_cleanup_controller)(struct sst_dsp *ctx);
215 int (*cl_copy_to_dmabuf)(struct sst_dsp *ctx, 215 int (*cl_copy_to_dmabuf)(struct sst_dsp *ctx,
216 const void *bin, u32 size); 216 const void *bin, u32 size, bool wait);
217 void (*cl_stop_dma)(struct sst_dsp *ctx); 217 void (*cl_stop_dma)(struct sst_dsp *ctx);
218}; 218};
219 219
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c
index c3deefab65d6..08332723c700 100644
--- a/sound/soc/intel/skylake/skl-sst-dsp.c
+++ b/sound/soc/intel/skylake/skl-sst-dsp.c
@@ -355,12 +355,13 @@ int skl_dsp_get_core(struct sst_dsp *ctx, unsigned int core_id)
355 ret = ctx->fw_ops.set_state_D0(ctx, core_id); 355 ret = ctx->fw_ops.set_state_D0(ctx, core_id);
356 if (ret < 0) { 356 if (ret < 0) {
357 dev_err(ctx->dev, "unable to get core%d\n", core_id); 357 dev_err(ctx->dev, "unable to get core%d\n", core_id);
358 return ret; 358 goto out;
359 } 359 }
360 } 360 }
361 361
362 skl->cores.usage_count[core_id]++; 362 skl->cores.usage_count[core_id]++;
363 363
364out:
364 dev_dbg(ctx->dev, "core id %d state %d usage_count %d\n", 365 dev_dbg(ctx->dev, "core id %d state %d usage_count %d\n",
365 core_id, skl->cores.state[core_id], 366 core_id, skl->cores.state[core_id],
366 skl->cores.usage_count[core_id]); 367 skl->cores.usage_count[core_id]);
@@ -379,7 +380,8 @@ int skl_dsp_put_core(struct sst_dsp *ctx, unsigned int core_id)
379 return -EINVAL; 380 return -EINVAL;
380 } 381 }
381 382
382 if (--skl->cores.usage_count[core_id] == 0) { 383 if ((--skl->cores.usage_count[core_id] == 0) &&
384 (skl->cores.state[core_id] != SKL_DSP_RESET)) {
383 ret = ctx->fw_ops.set_state_D3(ctx, core_id); 385 ret = ctx->fw_ops.set_state_D3(ctx, core_id);
384 if (ret < 0) { 386 if (ret < 0) {
385 dev_err(ctx->dev, "unable to put core %d: %d\n", 387 dev_err(ctx->dev, "unable to put core %d: %d\n",
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h
index 849410d0823e..eba20d37ba8c 100644
--- a/sound/soc/intel/skylake/skl-sst-dsp.h
+++ b/sound/soc/intel/skylake/skl-sst-dsp.h
@@ -17,13 +17,15 @@
17#define __SKL_SST_DSP_H__ 17#define __SKL_SST_DSP_H__
18 18
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/uuid.h>
21#include <linux/firmware.h>
20#include <sound/memalloc.h> 22#include <sound/memalloc.h>
21#include "skl-sst-cldma.h" 23#include "skl-sst-cldma.h"
22#include "skl-topology.h"
23 24
24struct sst_dsp; 25struct sst_dsp;
25struct skl_sst; 26struct skl_sst;
26struct sst_dsp_device; 27struct sst_dsp_device;
28struct skl_lib_info;
27 29
28/* Intel HD Audio General DSP Registers */ 30/* Intel HD Audio General DSP Registers */
29#define SKL_ADSP_GEN_BASE 0x0 31#define SKL_ADSP_GEN_BASE 0x0
@@ -144,7 +146,7 @@ struct skl_dsp_fw_ops {
144 int (*load_fw)(struct sst_dsp *ctx); 146 int (*load_fw)(struct sst_dsp *ctx);
145 /* FW module parser/loader */ 147 /* FW module parser/loader */
146 int (*load_library)(struct sst_dsp *ctx, 148 int (*load_library)(struct sst_dsp *ctx,
147 struct skl_lib_info *linfo, int count); 149 struct skl_lib_info *linfo, int lib_count);
148 int (*parse_fw)(struct sst_dsp *ctx); 150 int (*parse_fw)(struct sst_dsp *ctx);
149 int (*set_state_D0)(struct sst_dsp *ctx, unsigned int core_id); 151 int (*set_state_D0)(struct sst_dsp *ctx, unsigned int core_id);
150 int (*set_state_D3)(struct sst_dsp *ctx, unsigned int core_id); 152 int (*set_state_D3)(struct sst_dsp *ctx, unsigned int core_id);
@@ -172,6 +174,19 @@ struct skl_dsp_loader_ops {
172 int stream_tag); 174 int stream_tag);
173}; 175};
174 176
177#define MAX_INSTANCE_BUFF 2
178
179struct uuid_module {
180 uuid_le uuid;
181 int id;
182 int is_loadable;
183 int max_instance;
184 u64 pvt_id[MAX_INSTANCE_BUFF];
185 int *instance_id;
186
187 struct list_head list;
188};
189
175struct skl_load_module_info { 190struct skl_load_module_info {
176 u16 mod_id; 191 u16 mod_id;
177 const struct firmware *fw; 192 const struct firmware *fw;
@@ -186,6 +201,7 @@ struct skl_module_table {
186void skl_cldma_process_intr(struct sst_dsp *ctx); 201void skl_cldma_process_intr(struct sst_dsp *ctx);
187void skl_cldma_int_disable(struct sst_dsp *ctx); 202void skl_cldma_int_disable(struct sst_dsp *ctx);
188int skl_cldma_prepare(struct sst_dsp *ctx); 203int skl_cldma_prepare(struct sst_dsp *ctx);
204int skl_cldma_wait_interruptible(struct sst_dsp *ctx);
189 205
190void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state); 206void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state);
191struct sst_dsp *skl_dsp_ctx_init(struct device *dev, 207struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
@@ -214,6 +230,9 @@ int skl_dsp_boot(struct sst_dsp *ctx);
214int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, 230int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
215 const char *fw_name, struct skl_dsp_loader_ops dsp_ops, 231 const char *fw_name, struct skl_dsp_loader_ops dsp_ops,
216 struct skl_sst **dsp); 232 struct skl_sst **dsp);
233int kbl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
234 const char *fw_name, struct skl_dsp_loader_ops dsp_ops,
235 struct skl_sst **dsp);
217int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, 236int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
218 const char *fw_name, struct skl_dsp_loader_ops dsp_ops, 237 const char *fw_name, struct skl_dsp_loader_ops dsp_ops,
219 struct skl_sst **dsp); 238 struct skl_sst **dsp);
@@ -222,17 +241,22 @@ int bxt_sst_init_fw(struct device *dev, struct skl_sst *ctx);
222void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx); 241void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx);
223void bxt_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx); 242void bxt_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx);
224 243
225int snd_skl_get_module_info(struct skl_sst *ctx,
226 struct skl_module_cfg *mconfig);
227int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw, 244int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
228 unsigned int offset, int index); 245 unsigned int offset, int index);
229int skl_get_pvt_id(struct skl_sst *ctx, 246int skl_get_pvt_id(struct skl_sst *ctx, uuid_le *uuid_mod, int instance_id);
230 struct skl_module_cfg *mconfig); 247int skl_put_pvt_id(struct skl_sst *ctx, uuid_le *uuid_mod, int *pvt_id);
231int skl_put_pvt_id(struct skl_sst *ctx,
232 struct skl_module_cfg *mconfig);
233int skl_get_pvt_instance_id_map(struct skl_sst *ctx, 248int skl_get_pvt_instance_id_map(struct skl_sst *ctx,
234 int module_id, int instance_id); 249 int module_id, int instance_id);
235void skl_freeup_uuid_list(struct skl_sst *ctx); 250void skl_freeup_uuid_list(struct skl_sst *ctx);
236 251
237int skl_dsp_strip_extended_manifest(struct firmware *fw); 252int skl_dsp_strip_extended_manifest(struct firmware *fw);
253void skl_dsp_enable_notification(struct skl_sst *ctx, bool enable);
254int skl_sst_ctx_init(struct device *dev, int irq, const char *fw_name,
255 struct skl_dsp_loader_ops dsp_ops, struct skl_sst **dsp,
256 struct sst_dsp_device *skl_dev);
257int skl_prepare_lib_load(struct skl_sst *skl, struct skl_lib_info *linfo,
258 struct firmware *stripped_fw,
259 unsigned int hdr_offset, int index);
260void skl_release_library(struct skl_lib_info *linfo, int lib_count);
261
238#endif /*__SKL_SST_DSP_H__*/ 262#endif /*__SKL_SST_DSP_H__*/
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c
index e1391dfbc9e9..58c525096a7c 100644
--- a/sound/soc/intel/skylake/skl-sst-ipc.c
+++ b/sound/soc/intel/skylake/skl-sst-ipc.c
@@ -34,6 +34,11 @@
34#define IPC_GLB_REPLY_STATUS_MASK ((0x1 << IPC_GLB_REPLY_STATUS_SHIFT) - 1) 34#define IPC_GLB_REPLY_STATUS_MASK ((0x1 << IPC_GLB_REPLY_STATUS_SHIFT) - 1)
35#define IPC_GLB_REPLY_STATUS(x) ((x) << IPC_GLB_REPLY_STATUS_SHIFT) 35#define IPC_GLB_REPLY_STATUS(x) ((x) << IPC_GLB_REPLY_STATUS_SHIFT)
36 36
37#define IPC_GLB_REPLY_TYPE_SHIFT 29
38#define IPC_GLB_REPLY_TYPE_MASK 0x1F
39#define IPC_GLB_REPLY_TYPE(x) (((x) >> IPC_GLB_REPLY_TYPE_SHIFT) \
40 & IPC_GLB_RPLY_TYPE_MASK)
41
37#define IPC_TIMEOUT_MSECS 3000 42#define IPC_TIMEOUT_MSECS 3000
38 43
39#define IPC_EMPTY_LIST_SIZE 8 44#define IPC_EMPTY_LIST_SIZE 8
@@ -387,12 +392,27 @@ static int skl_ipc_process_notification(struct sst_generic_ipc *ipc,
387 return 0; 392 return 0;
388} 393}
389 394
395static int skl_ipc_set_reply_error_code(u32 reply)
396{
397 switch (reply) {
398 case IPC_GLB_REPLY_OUT_OF_MEMORY:
399 return -ENOMEM;
400
401 case IPC_GLB_REPLY_BUSY:
402 return -EBUSY;
403
404 default:
405 return -EINVAL;
406 }
407}
408
390static void skl_ipc_process_reply(struct sst_generic_ipc *ipc, 409static void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
391 struct skl_ipc_header header) 410 struct skl_ipc_header header)
392{ 411{
393 struct ipc_message *msg; 412 struct ipc_message *msg;
394 u32 reply = header.primary & IPC_GLB_REPLY_STATUS_MASK; 413 u32 reply = header.primary & IPC_GLB_REPLY_STATUS_MASK;
395 u64 *ipc_header = (u64 *)(&header); 414 u64 *ipc_header = (u64 *)(&header);
415 struct skl_sst *skl = container_of(ipc, struct skl_sst, ipc);
396 416
397 msg = skl_ipc_reply_get_msg(ipc, *ipc_header); 417 msg = skl_ipc_reply_get_msg(ipc, *ipc_header);
398 if (msg == NULL) { 418 if (msg == NULL) {
@@ -401,33 +421,39 @@ static void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
401 } 421 }
402 422
403 /* first process the header */ 423 /* first process the header */
404 switch (reply) { 424 if (reply == IPC_GLB_REPLY_SUCCESS) {
405 case IPC_GLB_REPLY_SUCCESS:
406 dev_dbg(ipc->dev, "ipc FW reply %x: success\n", header.primary); 425 dev_dbg(ipc->dev, "ipc FW reply %x: success\n", header.primary);
407 /* copy the rx data from the mailbox */ 426 /* copy the rx data from the mailbox */
408 sst_dsp_inbox_read(ipc->dsp, msg->rx_data, msg->rx_size); 427 sst_dsp_inbox_read(ipc->dsp, msg->rx_data, msg->rx_size);
409 break; 428 switch (IPC_GLB_NOTIFY_MSG_TYPE(header.primary)) {
410 429 case IPC_GLB_LOAD_MULTIPLE_MODS:
411 case IPC_GLB_REPLY_OUT_OF_MEMORY: 430 case IPC_GLB_LOAD_LIBRARY:
412 dev_err(ipc->dev, "ipc fw reply: %x: no memory\n", header.primary); 431 skl->mod_load_complete = true;
413 msg->errno = -ENOMEM; 432 skl->mod_load_status = true;
414 break; 433 wake_up(&skl->mod_load_wait);
415 434 break;
416 case IPC_GLB_REPLY_BUSY:
417 dev_err(ipc->dev, "ipc fw reply: %x: Busy\n", header.primary);
418 msg->errno = -EBUSY;
419 break;
420 435
421 default: 436 default:
422 dev_err(ipc->dev, "Unknown ipc reply: 0x%x\n", reply); 437 break;
423 msg->errno = -EINVAL;
424 break;
425 }
426 438
427 if (reply != IPC_GLB_REPLY_SUCCESS) { 439 }
440 } else {
441 msg->errno = skl_ipc_set_reply_error_code(reply);
428 dev_err(ipc->dev, "ipc FW reply: reply=%d\n", reply); 442 dev_err(ipc->dev, "ipc FW reply: reply=%d\n", reply);
429 dev_err(ipc->dev, "FW Error Code: %u\n", 443 dev_err(ipc->dev, "FW Error Code: %u\n",
430 ipc->dsp->fw_ops.get_fw_errcode(ipc->dsp)); 444 ipc->dsp->fw_ops.get_fw_errcode(ipc->dsp));
445 switch (IPC_GLB_NOTIFY_MSG_TYPE(header.primary)) {
446 case IPC_GLB_LOAD_MULTIPLE_MODS:
447 case IPC_GLB_LOAD_LIBRARY:
448 skl->mod_load_complete = true;
449 skl->mod_load_status = false;
450 wake_up(&skl->mod_load_wait);
451 break;
452
453 default:
454 break;
455
456 }
431 } 457 }
432 458
433 list_del(&msg->list); 459 list_del(&msg->list);
@@ -811,8 +837,8 @@ int skl_ipc_load_modules(struct sst_generic_ipc *ipc,
811 header.primary |= IPC_GLB_TYPE(IPC_GLB_LOAD_MULTIPLE_MODS); 837 header.primary |= IPC_GLB_TYPE(IPC_GLB_LOAD_MULTIPLE_MODS);
812 header.primary |= IPC_LOAD_MODULE_CNT(module_cnt); 838 header.primary |= IPC_LOAD_MODULE_CNT(module_cnt);
813 839
814 ret = sst_ipc_tx_message_wait(ipc, *ipc_header, data, 840 ret = sst_ipc_tx_message_nowait(ipc, *ipc_header, data,
815 (sizeof(u16) * module_cnt), NULL, 0); 841 (sizeof(u16) * module_cnt));
816 if (ret < 0) 842 if (ret < 0)
817 dev_err(ipc->dev, "ipc: load modules failed :%d\n", ret); 843 dev_err(ipc->dev, "ipc: load modules failed :%d\n", ret);
818 844
@@ -947,7 +973,7 @@ int skl_ipc_get_large_config(struct sst_generic_ipc *ipc,
947EXPORT_SYMBOL_GPL(skl_ipc_get_large_config); 973EXPORT_SYMBOL_GPL(skl_ipc_get_large_config);
948 974
949int skl_sst_ipc_load_library(struct sst_generic_ipc *ipc, 975int skl_sst_ipc_load_library(struct sst_generic_ipc *ipc,
950 u8 dma_id, u8 table_id) 976 u8 dma_id, u8 table_id, bool wait)
951{ 977{
952 struct skl_ipc_header header = {0}; 978 struct skl_ipc_header header = {0};
953 u64 *ipc_header = (u64 *)(&header); 979 u64 *ipc_header = (u64 *)(&header);
@@ -959,7 +985,11 @@ int skl_sst_ipc_load_library(struct sst_generic_ipc *ipc,
959 header.primary |= IPC_MOD_INSTANCE_ID(table_id); 985 header.primary |= IPC_MOD_INSTANCE_ID(table_id);
960 header.primary |= IPC_MOD_ID(dma_id); 986 header.primary |= IPC_MOD_ID(dma_id);
961 987
962 ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0); 988 if (wait)
989 ret = sst_ipc_tx_message_wait(ipc, *ipc_header,
990 NULL, 0, NULL, 0);
991 else
992 ret = sst_ipc_tx_message_nowait(ipc, *ipc_header, NULL, 0);
963 993
964 if (ret < 0) 994 if (ret < 0)
965 dev_err(ipc->dev, "ipc: load lib failed\n"); 995 dev_err(ipc->dev, "ipc: load lib failed\n");
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.h b/sound/soc/intel/skylake/skl-sst-ipc.h
index 9660ace379ab..e057da2713c6 100644
--- a/sound/soc/intel/skylake/skl-sst-ipc.h
+++ b/sound/soc/intel/skylake/skl-sst-ipc.h
@@ -69,6 +69,14 @@ struct skl_d0i3_data {
69 struct delayed_work work; 69 struct delayed_work work;
70}; 70};
71 71
72#define SKL_LIB_NAME_LENGTH 128
73#define SKL_MAX_LIB 16
74
75struct skl_lib_info {
76 char name[SKL_LIB_NAME_LENGTH];
77 const struct firmware *fw;
78};
79
72struct skl_sst { 80struct skl_sst {
73 struct device *dev; 81 struct device *dev;
74 struct sst_dsp *dsp; 82 struct sst_dsp *dsp;
@@ -77,6 +85,11 @@ struct skl_sst {
77 wait_queue_head_t boot_wait; 85 wait_queue_head_t boot_wait;
78 bool boot_complete; 86 bool boot_complete;
79 87
88 /* module load */
89 wait_queue_head_t mod_load_wait;
90 bool mod_load_complete;
91 bool mod_load_status;
92
80 /* IPC messaging */ 93 /* IPC messaging */
81 struct sst_generic_ipc ipc; 94 struct sst_generic_ipc ipc;
82 95
@@ -105,6 +118,8 @@ struct skl_sst {
105 void (*update_d0i3c)(struct device *dev, bool enable); 118 void (*update_d0i3c)(struct device *dev, bool enable);
106 119
107 struct skl_d0i3_data d0i3; 120 struct skl_d0i3_data d0i3;
121
122 const struct skl_dsp_ops *dsp_ops;
108}; 123};
109 124
110struct skl_ipc_init_instance_msg { 125struct skl_ipc_init_instance_msg {
@@ -182,7 +197,7 @@ int skl_ipc_get_large_config(struct sst_generic_ipc *ipc,
182 struct skl_ipc_large_config_msg *msg, u32 *param); 197 struct skl_ipc_large_config_msg *msg, u32 *param);
183 198
184int skl_sst_ipc_load_library(struct sst_generic_ipc *ipc, 199int skl_sst_ipc_load_library(struct sst_generic_ipc *ipc,
185 u8 dma_id, u8 table_id); 200 u8 dma_id, u8 table_id, bool wait);
186 201
187int skl_ipc_set_d0ix(struct sst_generic_ipc *ipc, 202int skl_ipc_set_d0ix(struct sst_generic_ipc *ipc,
188 struct skl_ipc_d0ix_msg *msg); 203 struct skl_ipc_d0ix_msg *msg);
diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c
index ea162fbf68e5..81ee251881b4 100644
--- a/sound/soc/intel/skylake/skl-sst-utils.c
+++ b/sound/soc/intel/skylake/skl-sst-utils.c
@@ -94,19 +94,6 @@ struct adsp_fw_hdr {
94 u32 load_offset; 94 u32 load_offset;
95} __packed; 95} __packed;
96 96
97#define MAX_INSTANCE_BUFF 2
98
99struct uuid_module {
100 uuid_le uuid;
101 int id;
102 int is_loadable;
103 int max_instance;
104 u64 pvt_id[MAX_INSTANCE_BUFF];
105 int *instance_id;
106
107 struct list_head list;
108};
109
110struct skl_ext_manifest_hdr { 97struct skl_ext_manifest_hdr {
111 u32 id; 98 u32 id;
112 u32 len; 99 u32 len;
@@ -115,32 +102,6 @@ struct skl_ext_manifest_hdr {
115 u32 entries; 102 u32 entries;
116}; 103};
117 104
118int snd_skl_get_module_info(struct skl_sst *ctx,
119 struct skl_module_cfg *mconfig)
120{
121 struct uuid_module *module;
122 uuid_le *uuid_mod;
123
124 uuid_mod = (uuid_le *)mconfig->guid;
125
126 if (list_empty(&ctx->uuid_list)) {
127 dev_err(ctx->dev, "Module list is empty\n");
128 return -EINVAL;
129 }
130
131 list_for_each_entry(module, &ctx->uuid_list, list) {
132 if (uuid_le_cmp(*uuid_mod, module->uuid) == 0) {
133 mconfig->id.module_id = module->id;
134 mconfig->is_loadable = module->is_loadable;
135
136 return 0;
137 }
138 }
139
140 return -EINVAL;
141}
142EXPORT_SYMBOL_GPL(snd_skl_get_module_info);
143
144static int skl_get_pvtid_map(struct uuid_module *module, int instance_id) 105static int skl_get_pvtid_map(struct uuid_module *module, int instance_id)
145{ 106{
146 int pvt_id; 107 int pvt_id;
@@ -222,21 +183,18 @@ static inline int skl_pvtid_128(struct uuid_module *module)
222 * This generates a 128 bit private unique id for a module TYPE so that 183 * This generates a 128 bit private unique id for a module TYPE so that
223 * module instance is unique 184 * module instance is unique
224 */ 185 */
225int skl_get_pvt_id(struct skl_sst *ctx, struct skl_module_cfg *mconfig) 186int skl_get_pvt_id(struct skl_sst *ctx, uuid_le *uuid_mod, int instance_id)
226{ 187{
227 struct uuid_module *module; 188 struct uuid_module *module;
228 uuid_le *uuid_mod;
229 int pvt_id; 189 int pvt_id;
230 190
231 uuid_mod = (uuid_le *)mconfig->guid;
232
233 list_for_each_entry(module, &ctx->uuid_list, list) { 191 list_for_each_entry(module, &ctx->uuid_list, list) {
234 if (uuid_le_cmp(*uuid_mod, module->uuid) == 0) { 192 if (uuid_le_cmp(*uuid_mod, module->uuid) == 0) {
235 193
236 pvt_id = skl_pvtid_128(module); 194 pvt_id = skl_pvtid_128(module);
237 if (pvt_id >= 0) { 195 if (pvt_id >= 0) {
238 module->instance_id[pvt_id] = 196 module->instance_id[pvt_id] = instance_id;
239 mconfig->id.instance_id; 197
240 return pvt_id; 198 return pvt_id;
241 } 199 }
242 } 200 }
@@ -254,23 +212,21 @@ EXPORT_SYMBOL_GPL(skl_get_pvt_id);
254 * 212 *
255 * This frees a 128 bit private unique id previously generated 213 * This frees a 128 bit private unique id previously generated
256 */ 214 */
257int skl_put_pvt_id(struct skl_sst *ctx, struct skl_module_cfg *mconfig) 215int skl_put_pvt_id(struct skl_sst *ctx, uuid_le *uuid_mod, int *pvt_id)
258{ 216{
259 int i; 217 int i;
260 uuid_le *uuid_mod;
261 struct uuid_module *module; 218 struct uuid_module *module;
262 219
263 uuid_mod = (uuid_le *)mconfig->guid;
264 list_for_each_entry(module, &ctx->uuid_list, list) { 220 list_for_each_entry(module, &ctx->uuid_list, list) {
265 if (uuid_le_cmp(*uuid_mod, module->uuid) == 0) { 221 if (uuid_le_cmp(*uuid_mod, module->uuid) == 0) {
266 222
267 if (mconfig->id.pvt_id != 0) 223 if (*pvt_id != 0)
268 i = (mconfig->id.pvt_id) / 64; 224 i = (*pvt_id) / 64;
269 else 225 else
270 i = 0; 226 i = 0;
271 227
272 module->pvt_id[i] &= ~(1 << (mconfig->id.pvt_id)); 228 module->pvt_id[i] &= ~(1 << (*pvt_id));
273 mconfig->id.pvt_id = -1; 229 *pvt_id = -1;
274 return 0; 230 return 0;
275 } 231 }
276 } 232 }
@@ -405,3 +361,83 @@ int skl_dsp_strip_extended_manifest(struct firmware *fw)
405 361
406 return 0; 362 return 0;
407} 363}
364
365int skl_sst_ctx_init(struct device *dev, int irq, const char *fw_name,
366 struct skl_dsp_loader_ops dsp_ops, struct skl_sst **dsp,
367 struct sst_dsp_device *skl_dev)
368{
369 struct skl_sst *skl;
370 struct sst_dsp *sst;
371 int ret;
372
373 skl = devm_kzalloc(dev, sizeof(*skl), GFP_KERNEL);
374 if (skl == NULL)
375 return -ENOMEM;
376
377 skl->dev = dev;
378 skl_dev->thread_context = skl;
379 INIT_LIST_HEAD(&skl->uuid_list);
380 skl->dsp = skl_dsp_ctx_init(dev, skl_dev, irq);
381 if (!skl->dsp) {
382 dev_err(skl->dev, "%s: no device\n", __func__);
383 return -ENODEV;
384 }
385
386 sst = skl->dsp;
387 sst->fw_name = fw_name;
388 sst->dsp_ops = dsp_ops;
389 init_waitqueue_head(&skl->mod_load_wait);
390 INIT_LIST_HEAD(&sst->module_list);
391 ret = skl_ipc_init(dev, skl);
392 if (ret)
393 return ret;
394
395 skl->is_first_boot = true;
396 if (dsp)
397 *dsp = skl;
398
399 return ret;
400}
401
402int skl_prepare_lib_load(struct skl_sst *skl, struct skl_lib_info *linfo,
403 struct firmware *stripped_fw,
404 unsigned int hdr_offset, int index)
405{
406 int ret;
407 struct sst_dsp *dsp = skl->dsp;
408
409 if (linfo->fw == NULL) {
410 ret = request_firmware(&linfo->fw, linfo->name,
411 skl->dev);
412 if (ret < 0) {
413 dev_err(skl->dev, "Request lib %s failed:%d\n",
414 linfo->name, ret);
415 return ret;
416 }
417 }
418
419 if (skl->is_first_boot) {
420 ret = snd_skl_parse_uuids(dsp, linfo->fw, hdr_offset, index);
421 if (ret < 0)
422 return ret;
423 }
424
425 stripped_fw->data = linfo->fw->data;
426 stripped_fw->size = linfo->fw->size;
427 skl_dsp_strip_extended_manifest(stripped_fw);
428
429 return 0;
430}
431
432void skl_release_library(struct skl_lib_info *linfo, int lib_count)
433{
434 int i;
435
436 /* library indices start from 1 to N. 0 represents base FW */
437 for (i = 1; i < lib_count; i++) {
438 if (linfo[i].fw) {
439 release_firmware(linfo[i].fw);
440 linfo[i].fw = NULL;
441 }
442 }
443}
diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c
index b30bd384c8d3..155e456b7a3a 100644
--- a/sound/soc/intel/skylake/skl-sst.c
+++ b/sound/soc/intel/skylake/skl-sst.c
@@ -52,7 +52,8 @@ static int skl_transfer_firmware(struct sst_dsp *ctx,
52{ 52{
53 int ret = 0; 53 int ret = 0;
54 54
55 ret = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, basefw, base_fw_size); 55 ret = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, basefw, base_fw_size,
56 true);
56 if (ret < 0) 57 if (ret < 0)
57 return ret; 58 return ret;
58 59
@@ -178,6 +179,18 @@ static int skl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
178 dev_err(ctx->dev, "unable to load firmware\n"); 179 dev_err(ctx->dev, "unable to load firmware\n");
179 return ret; 180 return ret;
180 } 181 }
182
183 /* load libs as they are also lost on D3 */
184 if (skl->lib_count > 1) {
185 ret = ctx->fw_ops.load_library(ctx, skl->lib_info,
186 skl->lib_count);
187 if (ret < 0) {
188 dev_err(ctx->dev, "reload libs failed: %d\n",
189 ret);
190 return ret;
191 }
192
193 }
181 } 194 }
182 195
183 /* 196 /*
@@ -203,7 +216,7 @@ static int skl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
203 216
204 skl->cores.state[core_id] = SKL_DSP_RUNNING; 217 skl->cores.state[core_id] = SKL_DSP_RUNNING;
205 218
206 return ret; 219 return 0;
207} 220}
208 221
209static int skl_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id) 222static int skl_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id)
@@ -323,27 +336,85 @@ static struct skl_module_table *skl_module_get_from_id(
323 return NULL; 336 return NULL;
324} 337}
325 338
326static int skl_transfer_module(struct sst_dsp *ctx, 339static int skl_transfer_module(struct sst_dsp *ctx, const void *data,
327 struct skl_load_module_info *module) 340 u32 size, u16 mod_id, u8 table_id, bool is_module)
328{ 341{
329 int ret; 342 int ret, bytes_left, curr_pos;
330 struct skl_sst *skl = ctx->thread_context; 343 struct skl_sst *skl = ctx->thread_context;
344 skl->mod_load_complete = false;
331 345
332 ret = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, module->fw->data, 346 bytes_left = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, data, size, false);
333 module->fw->size); 347 if (bytes_left < 0)
334 if (ret < 0) 348 return bytes_left;
335 return ret;
336 349
337 ret = skl_ipc_load_modules(&skl->ipc, SKL_NUM_MODULES, 350 /* check is_module flag to load module or library */
338 (void *)&module->mod_id); 351 if (is_module)
339 if (ret < 0) 352 ret = skl_ipc_load_modules(&skl->ipc, SKL_NUM_MODULES, &mod_id);
340 dev_err(ctx->dev, "Failed to Load module: %d\n", ret); 353 else
354 ret = skl_sst_ipc_load_library(&skl->ipc, 0, table_id, false);
355
356 if (ret < 0) {
357 dev_err(ctx->dev, "Failed to Load %s with err %d\n",
358 is_module ? "module" : "lib", ret);
359 goto out;
360 }
361
362 /*
363 * if bytes_left > 0 then wait for BDL complete interrupt and
364 * copy the next chunk till bytes_left is 0. if bytes_left is
365 * is zero, then wait for load module IPC reply
366 */
367 while (bytes_left > 0) {
368 curr_pos = size - bytes_left;
369
370 ret = skl_cldma_wait_interruptible(ctx);
371 if (ret < 0)
372 goto out;
373
374 bytes_left = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx,
375 data + curr_pos,
376 bytes_left, false);
377 }
341 378
379 ret = wait_event_timeout(skl->mod_load_wait, skl->mod_load_complete,
380 msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
381 if (ret == 0 || !skl->mod_load_status) {
382 dev_err(ctx->dev, "Module Load failed\n");
383 ret = -EIO;
384 }
385
386out:
342 ctx->cl_dev.ops.cl_stop_dma(ctx); 387 ctx->cl_dev.ops.cl_stop_dma(ctx);
343 388
344 return ret; 389 return ret;
345} 390}
346 391
392static int
393kbl_load_library(struct sst_dsp *ctx, struct skl_lib_info *linfo, int lib_count)
394{
395 struct skl_sst *skl = ctx->thread_context;
396 struct firmware stripped_fw;
397 int ret, i;
398
399 /* library indices start from 1 to N. 0 represents base FW */
400 for (i = 1; i < lib_count; i++) {
401 ret = skl_prepare_lib_load(skl, &skl->lib_info[i], &stripped_fw,
402 SKL_ADSP_FW_BIN_HDR_OFFSET, i);
403 if (ret < 0)
404 goto load_library_failed;
405 ret = skl_transfer_module(ctx, stripped_fw.data,
406 stripped_fw.size, 0, i, false);
407 if (ret < 0)
408 goto load_library_failed;
409 }
410
411 return 0;
412
413load_library_failed:
414 skl_release_library(linfo, lib_count);
415 return ret;
416}
417
347static int skl_load_module(struct sst_dsp *ctx, u16 mod_id, u8 *guid) 418static int skl_load_module(struct sst_dsp *ctx, u16 mod_id, u8 *guid)
348{ 419{
349 struct skl_module_table *module_entry = NULL; 420 struct skl_module_table *module_entry = NULL;
@@ -365,7 +436,9 @@ static int skl_load_module(struct sst_dsp *ctx, u16 mod_id, u8 *guid)
365 } 436 }
366 437
367 if (!module_entry->usage_cnt) { 438 if (!module_entry->usage_cnt) {
368 ret = skl_transfer_module(ctx, module_entry->mod_info); 439 ret = skl_transfer_module(ctx, module_entry->mod_info->fw->data,
440 module_entry->mod_info->fw->size,
441 mod_id, 0, true);
369 if (ret < 0) { 442 if (ret < 0) {
370 dev_err(ctx->dev, "Failed to Load module\n"); 443 dev_err(ctx->dev, "Failed to Load module\n");
371 return ret; 444 return ret;
@@ -388,6 +461,11 @@ static int skl_unload_module(struct sst_dsp *ctx, u16 mod_id)
388 dev_err(ctx->dev, "Module bad usage cnt!:%d\n", usage_cnt); 461 dev_err(ctx->dev, "Module bad usage cnt!:%d\n", usage_cnt);
389 return -EIO; 462 return -EIO;
390 } 463 }
464
465 /* if module is used by others return, no need to unload */
466 if (usage_cnt > 0)
467 return 0;
468
391 ret = skl_ipc_unload_modules(&skl->ipc, 469 ret = skl_ipc_unload_modules(&skl->ipc,
392 SKL_NUM_MODULES, &mod_id); 470 SKL_NUM_MODULES, &mod_id);
393 if (ret < 0) { 471 if (ret < 0) {
@@ -434,6 +512,16 @@ static struct skl_dsp_fw_ops skl_fw_ops = {
434 .unload_mod = skl_unload_module, 512 .unload_mod = skl_unload_module,
435}; 513};
436 514
515static struct skl_dsp_fw_ops kbl_fw_ops = {
516 .set_state_D0 = skl_set_dsp_D0,
517 .set_state_D3 = skl_set_dsp_D3,
518 .load_fw = skl_load_base_firmware,
519 .get_fw_errcode = skl_get_errorcode,
520 .load_library = kbl_load_library,
521 .load_mod = skl_load_module,
522 .unload_mod = skl_unload_module,
523};
524
437static struct sst_ops skl_ops = { 525static struct sst_ops skl_ops = {
438 .irq_handler = skl_dsp_sst_interrupt, 526 .irq_handler = skl_dsp_sst_interrupt,
439 .write = sst_shim32_write, 527 .write = sst_shim32_write,
@@ -455,45 +543,47 @@ int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
455 struct sst_dsp *sst; 543 struct sst_dsp *sst;
456 int ret; 544 int ret;
457 545
458 skl = devm_kzalloc(dev, sizeof(*skl), GFP_KERNEL); 546 ret = skl_sst_ctx_init(dev, irq, fw_name, dsp_ops, dsp, &skl_dev);
459 if (skl == NULL) 547 if (ret < 0) {
460 return -ENOMEM; 548 dev_err(dev, "%s: no device\n", __func__);
461 549 return ret;
462 skl->dev = dev;
463 skl_dev.thread_context = skl;
464 INIT_LIST_HEAD(&skl->uuid_list);
465
466 skl->dsp = skl_dsp_ctx_init(dev, &skl_dev, irq);
467 if (!skl->dsp) {
468 dev_err(skl->dev, "%s: no device\n", __func__);
469 return -ENODEV;
470 } 550 }
471 551
552 skl = *dsp;
472 sst = skl->dsp; 553 sst = skl->dsp;
473
474 sst->fw_name = fw_name;
475 sst->addr.lpe = mmio_base; 554 sst->addr.lpe = mmio_base;
476 sst->addr.shim = mmio_base; 555 sst->addr.shim = mmio_base;
477 sst_dsp_mailbox_init(sst, (SKL_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ), 556 sst_dsp_mailbox_init(sst, (SKL_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
478 SKL_ADSP_W0_UP_SZ, SKL_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ); 557 SKL_ADSP_W0_UP_SZ, SKL_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
479 558
480 INIT_LIST_HEAD(&sst->module_list);
481 sst->dsp_ops = dsp_ops;
482 sst->fw_ops = skl_fw_ops; 559 sst->fw_ops = skl_fw_ops;
483 560
484 ret = skl_ipc_init(dev, skl); 561 skl->cores.count = 2;
485 if (ret) 562
563 return 0;
564}
565EXPORT_SYMBOL_GPL(skl_sst_dsp_init);
566
567int kbl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
568 const char *fw_name, struct skl_dsp_loader_ops dsp_ops,
569 struct skl_sst **dsp)
570{
571 struct sst_dsp *sst;
572 int ret;
573
574 ret = skl_sst_dsp_init(dev, mmio_base, irq, fw_name, dsp_ops, dsp);
575 if (ret < 0) {
576 dev_err(dev, "%s: Init failed %d\n", __func__, ret);
486 return ret; 577 return ret;
578 }
487 579
488 skl->cores.count = 2; 580 sst = (*dsp)->dsp;
489 skl->is_first_boot = true; 581 sst->fw_ops = kbl_fw_ops;
490 582
491 if (dsp) 583 return 0;
492 *dsp = skl;
493 584
494 return ret;
495} 585}
496EXPORT_SYMBOL_GPL(skl_sst_dsp_init); 586EXPORT_SYMBOL_GPL(kbl_sst_dsp_init);
497 587
498int skl_sst_init_fw(struct device *dev, struct skl_sst *ctx) 588int skl_sst_init_fw(struct device *dev, struct skl_sst *ctx)
499{ 589{
@@ -507,6 +597,15 @@ int skl_sst_init_fw(struct device *dev, struct skl_sst *ctx)
507 } 597 }
508 598
509 skl_dsp_init_core_state(sst); 599 skl_dsp_init_core_state(sst);
600
601 if (ctx->lib_count > 1) {
602 ret = sst->fw_ops.load_library(sst, ctx->lib_info,
603 ctx->lib_count);
604 if (ret < 0) {
605 dev_err(dev, "Load Library failed : %x\n", ret);
606 return ret;
607 }
608 }
510 ctx->is_first_boot = false; 609 ctx->is_first_boot = false;
511 610
512 return 0; 611 return 0;
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 2dbfb1b24ef4..3a99712e44a8 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -299,8 +299,6 @@ static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
299{ 299{
300 int multiplier = 1; 300 int multiplier = 1;
301 struct skl_module_fmt *in_fmt, *out_fmt; 301 struct skl_module_fmt *in_fmt, *out_fmt;
302 int in_rate, out_rate;
303
304 302
305 /* Since fixups is applied to pin 0 only, ibs, obs needs 303 /* Since fixups is applied to pin 0 only, ibs, obs needs
306 * change for pin 0 only 304 * change for pin 0 only
@@ -311,22 +309,12 @@ static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
311 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT) 309 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
312 multiplier = 5; 310 multiplier = 5;
313 311
314 if (in_fmt->s_freq % 1000) 312 mcfg->ibs = DIV_ROUND_UP(in_fmt->s_freq, 1000) *
315 in_rate = (in_fmt->s_freq / 1000) + 1; 313 in_fmt->channels * (in_fmt->bit_depth >> 3) *
316 else
317 in_rate = (in_fmt->s_freq / 1000);
318
319 mcfg->ibs = in_rate * (mcfg->in_fmt->channels) *
320 (mcfg->in_fmt->bit_depth >> 3) *
321 multiplier; 314 multiplier;
322 315
323 if (mcfg->out_fmt->s_freq % 1000) 316 mcfg->obs = DIV_ROUND_UP(out_fmt->s_freq, 1000) *
324 out_rate = (mcfg->out_fmt->s_freq / 1000) + 1; 317 out_fmt->channels * (out_fmt->bit_depth >> 3) *
325 else
326 out_rate = (mcfg->out_fmt->s_freq / 1000);
327
328 mcfg->obs = out_rate * (mcfg->out_fmt->channels) *
329 (mcfg->out_fmt->bit_depth >> 3) *
330 multiplier; 318 multiplier;
331} 319}
332 320
@@ -551,6 +539,7 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
551 int ret = 0; 539 int ret = 0;
552 540
553 list_for_each_entry(w_module, &pipe->w_list, node) { 541 list_for_each_entry(w_module, &pipe->w_list, node) {
542 uuid_le *uuid_mod;
554 w = w_module->w; 543 w = w_module->w;
555 mconfig = w->priv; 544 mconfig = w->priv;
556 545
@@ -588,13 +577,15 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
588 * FE/BE params 577 * FE/BE params
589 */ 578 */
590 skl_tplg_update_module_params(w, ctx); 579 skl_tplg_update_module_params(w, ctx);
591 mconfig->id.pvt_id = skl_get_pvt_id(ctx, mconfig); 580 uuid_mod = (uuid_le *)mconfig->guid;
581 mconfig->id.pvt_id = skl_get_pvt_id(ctx, uuid_mod,
582 mconfig->id.instance_id);
592 if (mconfig->id.pvt_id < 0) 583 if (mconfig->id.pvt_id < 0)
593 return ret; 584 return ret;
594 skl_tplg_set_module_init_data(w); 585 skl_tplg_set_module_init_data(w);
595 ret = skl_init_module(ctx, mconfig); 586 ret = skl_init_module(ctx, mconfig);
596 if (ret < 0) { 587 if (ret < 0) {
597 skl_put_pvt_id(ctx, mconfig); 588 skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id);
598 return ret; 589 return ret;
599 } 590 }
600 skl_tplg_alloc_pipe_mcps(skl, mconfig); 591 skl_tplg_alloc_pipe_mcps(skl, mconfig);
@@ -614,7 +605,9 @@ static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
614 struct skl_module_cfg *mconfig = NULL; 605 struct skl_module_cfg *mconfig = NULL;
615 606
616 list_for_each_entry(w_module, &pipe->w_list, node) { 607 list_for_each_entry(w_module, &pipe->w_list, node) {
608 uuid_le *uuid_mod;
617 mconfig = w_module->w->priv; 609 mconfig = w_module->w->priv;
610 uuid_mod = (uuid_le *)mconfig->guid;
618 611
619 if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod && 612 if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod &&
620 mconfig->m_state > SKL_MODULE_UNINIT) { 613 mconfig->m_state > SKL_MODULE_UNINIT) {
@@ -623,7 +616,7 @@ static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
623 if (ret < 0) 616 if (ret < 0)
624 return -EIO; 617 return -EIO;
625 } 618 }
626 skl_put_pvt_id(ctx, mconfig); 619 skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id);
627 } 620 }
628 621
629 /* no modules to unload in this path, so return */ 622 /* no modules to unload in this path, so return */
@@ -645,8 +638,9 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
645 struct skl_module_cfg *mconfig = w->priv; 638 struct skl_module_cfg *mconfig = w->priv;
646 struct skl_pipe_module *w_module; 639 struct skl_pipe_module *w_module;
647 struct skl_pipe *s_pipe = mconfig->pipe; 640 struct skl_pipe *s_pipe = mconfig->pipe;
648 struct skl_module_cfg *src_module = NULL, *dst_module; 641 struct skl_module_cfg *src_module = NULL, *dst_module, *module;
649 struct skl_sst *ctx = skl->skl_sst; 642 struct skl_sst *ctx = skl->skl_sst;
643 struct skl_module_deferred_bind *modules;
650 644
651 /* check resource available */ 645 /* check resource available */
652 if (!skl_is_pipe_mcps_avail(skl, mconfig)) 646 if (!skl_is_pipe_mcps_avail(skl, mconfig))
@@ -687,29 +681,48 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
687 src_module = dst_module; 681 src_module = dst_module;
688 } 682 }
689 683
684 /*
685 * When the destination module is initialized, check for these modules
686 * in deferred bind list. If found, bind them.
687 */
688 list_for_each_entry(w_module, &s_pipe->w_list, node) {
689 if (list_empty(&skl->bind_list))
690 break;
691
692 list_for_each_entry(modules, &skl->bind_list, node) {
693 module = w_module->w->priv;
694 if (modules->dst == module)
695 skl_bind_modules(ctx, modules->src,
696 modules->dst);
697 }
698 }
699
690 return 0; 700 return 0;
691} 701}
692 702
693static int skl_fill_sink_instance_id(struct skl_sst *ctx, 703static int skl_fill_sink_instance_id(struct skl_sst *ctx, u32 *params,
694 struct skl_algo_data *alg_data) 704 int size, struct skl_module_cfg *mcfg)
695{ 705{
696 struct skl_kpb_params *params = (struct skl_kpb_params *)alg_data->params;
697 struct skl_mod_inst_map *inst;
698 int i, pvt_id; 706 int i, pvt_id;
699 707
700 inst = params->map; 708 if (mcfg->m_type == SKL_MODULE_TYPE_KPB) {
709 struct skl_kpb_params *kpb_params =
710 (struct skl_kpb_params *)params;
711 struct skl_mod_inst_map *inst = kpb_params->map;
701 712
702 for (i = 0; i < params->num_modules; i++) { 713 for (i = 0; i < kpb_params->num_modules; i++) {
703 pvt_id = skl_get_pvt_instance_id_map(ctx, 714 pvt_id = skl_get_pvt_instance_id_map(ctx, inst->mod_id,
704 inst->mod_id, inst->inst_id); 715 inst->inst_id);
705 if (pvt_id < 0) 716 if (pvt_id < 0)
706 return -EINVAL; 717 return -EINVAL;
707 inst->inst_id = pvt_id; 718
708 inst++; 719 inst->inst_id = pvt_id;
720 inst++;
721 }
709 } 722 }
723
710 return 0; 724 return 0;
711} 725}
712
713/* 726/*
714 * Some modules require params to be set after the module is bound to 727 * Some modules require params to be set after the module is bound to
715 * all pins connected. 728 * all pins connected.
@@ -726,6 +739,7 @@ static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
726 struct soc_bytes_ext *sb; 739 struct soc_bytes_ext *sb;
727 struct skl_algo_data *bc; 740 struct skl_algo_data *bc;
728 struct skl_specific_cfg *sp_cfg; 741 struct skl_specific_cfg *sp_cfg;
742 u32 *params;
729 743
730 /* 744 /*
731 * check all out/in pins are in bind state. 745 * check all out/in pins are in bind state.
@@ -758,11 +772,18 @@ static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
758 bc = (struct skl_algo_data *)sb->dobj.private; 772 bc = (struct skl_algo_data *)sb->dobj.private;
759 773
760 if (bc->set_params == SKL_PARAM_BIND) { 774 if (bc->set_params == SKL_PARAM_BIND) {
761 if (mconfig->m_type == SKL_MODULE_TYPE_KPB) 775 params = kzalloc(bc->max, GFP_KERNEL);
762 skl_fill_sink_instance_id(ctx, bc); 776 if (!params)
763 ret = skl_set_module_params(ctx, 777 return -ENOMEM;
764 (u32 *)bc->params, bc->max, 778
765 bc->param_id, mconfig); 779 memcpy(params, bc->params, bc->max);
780 skl_fill_sink_instance_id(ctx, params, bc->max,
781 mconfig);
782
783 ret = skl_set_module_params(ctx, params,
784 bc->max, bc->param_id, mconfig);
785 kfree(params);
786
766 if (ret < 0) 787 if (ret < 0)
767 return ret; 788 return ret;
768 } 789 }
@@ -772,6 +793,44 @@ static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
772 return 0; 793 return 0;
773} 794}
774 795
796
797static int skl_tplg_module_add_deferred_bind(struct skl *skl,
798 struct skl_module_cfg *src, struct skl_module_cfg *dst)
799{
800 struct skl_module_deferred_bind *m_list, *modules;
801 int i;
802
803 /* only supported for module with static pin connection */
804 for (i = 0; i < dst->max_in_queue; i++) {
805 struct skl_module_pin *pin = &dst->m_in_pin[i];
806
807 if (pin->is_dynamic)
808 continue;
809
810 if ((pin->id.module_id == src->id.module_id) &&
811 (pin->id.instance_id == src->id.instance_id)) {
812
813 if (!list_empty(&skl->bind_list)) {
814 list_for_each_entry(modules, &skl->bind_list, node) {
815 if (modules->src == src && modules->dst == dst)
816 return 0;
817 }
818 }
819
820 m_list = kzalloc(sizeof(*m_list), GFP_KERNEL);
821 if (!m_list)
822 return -ENOMEM;
823
824 m_list->src = src;
825 m_list->dst = dst;
826
827 list_add(&m_list->node, &skl->bind_list);
828 }
829 }
830
831 return 0;
832}
833
775static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w, 834static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
776 struct skl *skl, 835 struct skl *skl,
777 struct snd_soc_dapm_widget *src_w, 836 struct snd_soc_dapm_widget *src_w,
@@ -806,6 +865,28 @@ static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
806 sink = p->sink; 865 sink = p->sink;
807 sink_mconfig = sink->priv; 866 sink_mconfig = sink->priv;
808 867
868 /*
869 * Modules other than PGA leaf can be connected
870 * directly or via switch to a module in another
871 * pipeline. EX: reference path
872 * when the path is enabled, the dst module that needs
873 * to be bound may not be initialized. if the module is
874 * not initialized, add these modules in the deferred
875 * bind list and when the dst module is initialised,
876 * bind this module to the dst_module in deferred list.
877 */
878 if (((src_mconfig->m_state == SKL_MODULE_INIT_DONE)
879 && (sink_mconfig->m_state == SKL_MODULE_UNINIT))) {
880
881 ret = skl_tplg_module_add_deferred_bind(skl,
882 src_mconfig, sink_mconfig);
883
884 if (ret < 0)
885 return ret;
886
887 }
888
889
809 if (src_mconfig->m_state == SKL_MODULE_UNINIT || 890 if (src_mconfig->m_state == SKL_MODULE_UNINIT ||
810 sink_mconfig->m_state == SKL_MODULE_UNINIT) 891 sink_mconfig->m_state == SKL_MODULE_UNINIT)
811 continue; 892 continue;
@@ -985,15 +1066,6 @@ static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w,
985 src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg; 1066 src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg;
986 if (!src_mconfig) 1067 if (!src_mconfig)
987 continue; 1068 continue;
988 /*
989 * If path_found == 1, that means pmd for source
990 * pipe has not occurred, source is connected to
991 * some other sink. so its responsibility of sink
992 * to unbind itself from source.
993 */
994 ret = skl_stop_pipe(ctx, src_mconfig->pipe);
995 if (ret < 0)
996 return ret;
997 1069
998 ret = skl_unbind_modules(ctx, 1070 ret = skl_unbind_modules(ctx,
999 src_mconfig, sink_mconfig); 1071 src_mconfig, sink_mconfig);
@@ -1019,6 +1091,7 @@ static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1019 struct skl_module_cfg *src_module = NULL, *dst_module; 1091 struct skl_module_cfg *src_module = NULL, *dst_module;
1020 struct skl_sst *ctx = skl->skl_sst; 1092 struct skl_sst *ctx = skl->skl_sst;
1021 struct skl_pipe *s_pipe = mconfig->pipe; 1093 struct skl_pipe *s_pipe = mconfig->pipe;
1094 struct skl_module_deferred_bind *modules, *tmp;
1022 1095
1023 if (s_pipe->state == SKL_PIPE_INVALID) 1096 if (s_pipe->state == SKL_PIPE_INVALID)
1024 return -EINVAL; 1097 return -EINVAL;
@@ -1027,6 +1100,35 @@ static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1027 skl_tplg_free_pipe_mem(skl, mconfig); 1100 skl_tplg_free_pipe_mem(skl, mconfig);
1028 1101
1029 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1102 list_for_each_entry(w_module, &s_pipe->w_list, node) {
1103 if (list_empty(&skl->bind_list))
1104 break;
1105
1106 src_module = w_module->w->priv;
1107
1108 list_for_each_entry_safe(modules, tmp, &skl->bind_list, node) {
1109 /*
1110 * When the destination module is deleted, Unbind the
1111 * modules from deferred bind list.
1112 */
1113 if (modules->dst == src_module) {
1114 skl_unbind_modules(ctx, modules->src,
1115 modules->dst);
1116 }
1117
1118 /*
1119 * When the source module is deleted, remove this entry
1120 * from the deferred bind list.
1121 */
1122 if (modules->src == src_module) {
1123 list_del(&modules->node);
1124 modules->src = NULL;
1125 modules->dst = NULL;
1126 kfree(modules);
1127 }
1128 }
1129 }
1130
1131 list_for_each_entry(w_module, &s_pipe->w_list, node) {
1030 dst_module = w_module->w->priv; 1132 dst_module = w_module->w->priv;
1031 1133
1032 if (mconfig->m_state >= SKL_MODULE_INIT_DONE) 1134 if (mconfig->m_state >= SKL_MODULE_INIT_DONE)
@@ -1042,6 +1144,11 @@ static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1042 1144
1043 skl_delete_pipe(ctx, mconfig->pipe); 1145 skl_delete_pipe(ctx, mconfig->pipe);
1044 1146
1147 list_for_each_entry(w_module, &s_pipe->w_list, node) {
1148 src_module = w_module->w->priv;
1149 src_module->m_state = SKL_MODULE_UNINIT;
1150 }
1151
1045 return skl_tplg_unload_pipe_modules(ctx, s_pipe); 1152 return skl_tplg_unload_pipe_modules(ctx, s_pipe);
1046} 1153}
1047 1154
@@ -1083,36 +1190,6 @@ static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1083} 1190}
1084 1191
1085/* 1192/*
1086 * In modelling, we assume there will be ONLY one mixer in a pipeline. If
1087 * mixer is not required then it is treated as static mixer aka vmixer with
1088 * a hard path to source module
1089 * So we don't need to check if source is started or not as hard path puts
1090 * dependency on each other
1091 */
1092static int skl_tplg_vmixer_event(struct snd_soc_dapm_widget *w,
1093 struct snd_kcontrol *k, int event)
1094{
1095 struct snd_soc_dapm_context *dapm = w->dapm;
1096 struct skl *skl = get_skl_ctx(dapm->dev);
1097
1098 switch (event) {
1099 case SND_SOC_DAPM_PRE_PMU:
1100 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
1101
1102 case SND_SOC_DAPM_POST_PMU:
1103 return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
1104
1105 case SND_SOC_DAPM_PRE_PMD:
1106 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
1107
1108 case SND_SOC_DAPM_POST_PMD:
1109 return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1110 }
1111
1112 return 0;
1113}
1114
1115/*
1116 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a 1193 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a
1117 * second one is required that is created as another pipe entity. 1194 * second one is required that is created as another pipe entity.
1118 * The mixer is responsible for pipe management and represent a pipeline 1195 * The mixer is responsible for pipe management and represent a pipeline
@@ -1252,10 +1329,12 @@ static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
1252 case SKL_DEVICE_HDALINK: 1329 case SKL_DEVICE_HDALINK:
1253 pipe->p_params->link_dma_id = params->link_dma_id; 1330 pipe->p_params->link_dma_id = params->link_dma_id;
1254 pipe->p_params->link_index = params->link_index; 1331 pipe->p_params->link_index = params->link_index;
1332 pipe->p_params->link_bps = params->link_bps;
1255 break; 1333 break;
1256 1334
1257 case SKL_DEVICE_HDAHOST: 1335 case SKL_DEVICE_HDAHOST:
1258 pipe->p_params->host_dma_id = params->host_dma_id; 1336 pipe->p_params->host_dma_id = params->host_dma_id;
1337 pipe->p_params->host_bps = params->host_bps;
1259 break; 1338 break;
1260 1339
1261 default: 1340 default:
@@ -1578,7 +1657,7 @@ int skl_tplg_be_update_params(struct snd_soc_dai *dai,
1578 1657
1579static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = { 1658static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = {
1580 {SKL_MIXER_EVENT, skl_tplg_mixer_event}, 1659 {SKL_MIXER_EVENT, skl_tplg_mixer_event},
1581 {SKL_VMIXER_EVENT, skl_tplg_vmixer_event}, 1660 {SKL_VMIXER_EVENT, skl_tplg_mixer_event},
1582 {SKL_PGA_EVENT, skl_tplg_pga_event}, 1661 {SKL_PGA_EVENT, skl_tplg_pga_event},
1583}; 1662};
1584 1663
@@ -1632,7 +1711,7 @@ static int skl_tplg_add_pipe(struct device *dev,
1632 list_for_each_entry(ppl, &skl->ppl_list, node) { 1711 list_for_each_entry(ppl, &skl->ppl_list, node) {
1633 if (ppl->pipe->ppl_id == tkn_elem->value) { 1712 if (ppl->pipe->ppl_id == tkn_elem->value) {
1634 mconfig->pipe = ppl->pipe; 1713 mconfig->pipe = ppl->pipe;
1635 return EEXIST; 1714 return -EEXIST;
1636 } 1715 }
1637 } 1716 }
1638 1717
@@ -1924,11 +2003,13 @@ static int skl_tplg_get_token(struct device *dev,
1924 ret = skl_tplg_add_pipe(dev, 2003 ret = skl_tplg_add_pipe(dev,
1925 mconfig, skl, tkn_elem); 2004 mconfig, skl, tkn_elem);
1926 2005
1927 if (ret < 0) 2006 if (ret < 0) {
2007 if (ret == -EEXIST) {
2008 is_pipe_exists = 1;
2009 break;
2010 }
1928 return is_pipe_exists; 2011 return is_pipe_exists;
1929 2012 }
1930 if (ret == EEXIST)
1931 is_pipe_exists = 1;
1932 2013
1933 break; 2014 break;
1934 2015
diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h
index fefab0e99a3b..cc64d6bdb4f6 100644
--- a/sound/soc/intel/skylake/skl-topology.h
+++ b/sound/soc/intel/skylake/skl-topology.h
@@ -257,6 +257,8 @@ struct skl_pipe_params {
257 snd_pcm_format_t format; 257 snd_pcm_format_t format;
258 int link_index; 258 int link_index;
259 int stream; 259 int stream;
260 unsigned int host_bps;
261 unsigned int link_bps;
260}; 262};
261 263
262struct skl_pipe { 264struct skl_pipe {
@@ -334,17 +336,10 @@ struct skl_pipeline {
334 struct list_head node; 336 struct list_head node;
335}; 337};
336 338
337#define SKL_LIB_NAME_LENGTH 128 339struct skl_module_deferred_bind {
338#define SKL_MAX_LIB 16 340 struct skl_module_cfg *src;
339 341 struct skl_module_cfg *dst;
340struct skl_lib_info { 342 struct list_head node;
341 char name[SKL_LIB_NAME_LENGTH];
342 const struct firmware *fw;
343};
344
345struct skl_manifest {
346 u32 lib_count;
347 struct skl_lib_info lib[SKL_MAX_LIB];
348}; 343};
349 344
350static inline struct skl *get_skl_ctx(struct device *dev) 345static inline struct skl *get_skl_ctx(struct device *dev)
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 0c57d4eaae3a..6df3b317a476 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -512,7 +512,7 @@ static int probe_codec(struct hdac_ext_bus *ebus, int addr)
512 struct hdac_bus *bus = ebus_to_hbus(ebus); 512 struct hdac_bus *bus = ebus_to_hbus(ebus);
513 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) | 513 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
514 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID; 514 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
515 unsigned int res; 515 unsigned int res = -1;
516 516
517 mutex_lock(&bus->cmd_mutex); 517 mutex_lock(&bus->cmd_mutex);
518 snd_hdac_bus_send_cmd(bus, cmd); 518 snd_hdac_bus_send_cmd(bus, cmd);
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
index 8e2878012d53..a454f6035f3e 100644
--- a/sound/soc/intel/skylake/skl.h
+++ b/sound/soc/intel/skylake/skl.h
@@ -56,6 +56,7 @@ struct skl {
56 56
57 struct skl_dsp_resource resource; 57 struct skl_dsp_resource resource;
58 struct list_head ppl_list; 58 struct list_head ppl_list;
59 struct list_head bind_list;
59 60
60 const char *fw_name; 61 const char *fw_name;
61 char tplg_name[64]; 62 char tplg_name[64];
diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
index d7013bde6f45..5c68797f36c4 100644
--- a/sound/soc/mediatek/Kconfig
+++ b/sound/soc/mediatek/Kconfig
@@ -22,6 +22,16 @@ config SND_SOC_MT2701_CS42448
22 Select Y if you have such device. 22 Select Y if you have such device.
23 If unsure select "N". 23 If unsure select "N".
24 24
25config SND_SOC_MT2701_WM8960
26 tristate "ASoc Audio driver for MT2701 with WM8960 codec"
27 depends on SND_SOC_MT2701 && I2C
28 select SND_SOC_WM8960
29 help
30 This adds ASoC driver for Mediatek MT2701 boards
31 with the WM8960 codecs.
32 Select Y if you have such device.
33 If unsure select "N".
34
25config SND_SOC_MT8173 35config SND_SOC_MT8173
26 tristate "ASoC support for Mediatek MT8173 chip" 36 tristate "ASoC support for Mediatek MT8173 chip"
27 depends on ARCH_MEDIATEK 37 depends on ARCH_MEDIATEK
diff --git a/sound/soc/mediatek/mt2701/Makefile b/sound/soc/mediatek/mt2701/Makefile
index 31c3d04d4942..c91deb6aca21 100644
--- a/sound/soc/mediatek/mt2701/Makefile
+++ b/sound/soc/mediatek/mt2701/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_SND_SOC_MT2701) += snd-soc-mt2701-afe.o
17 17
18# machine driver 18# machine driver
19obj-$(CONFIG_SND_SOC_MT2701_CS42448) += mt2701-cs42448.o 19obj-$(CONFIG_SND_SOC_MT2701_CS42448) += mt2701-cs42448.o
20obj-$(CONFIG_SND_SOC_MT2701_WM8960) += mt2701-wm8960.o
diff --git a/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c b/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c
index c7fa3e663463..bc5d4db94de6 100644
--- a/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c
+++ b/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c
@@ -604,6 +604,22 @@ static struct snd_soc_dai_ops mt2701_btmrg_ops = {
604static struct snd_soc_dai_driver mt2701_afe_pcm_dais[] = { 604static struct snd_soc_dai_driver mt2701_afe_pcm_dais[] = {
605 /* FE DAIs: memory intefaces to CPU */ 605 /* FE DAIs: memory intefaces to CPU */
606 { 606 {
607 .name = "PCMO0",
608 .id = MT2701_MEMIF_DL1,
609 .suspend = mtk_afe_dai_suspend,
610 .resume = mtk_afe_dai_resume,
611 .playback = {
612 .stream_name = "DL1",
613 .channels_min = 1,
614 .channels_max = 2,
615 .rates = SNDRV_PCM_RATE_8000_192000,
616 .formats = (SNDRV_PCM_FMTBIT_S16_LE
617 | SNDRV_PCM_FMTBIT_S24_LE
618 | SNDRV_PCM_FMTBIT_S32_LE)
619 },
620 .ops = &mt2701_single_memif_dai_ops,
621 },
622 {
607 .name = "PCM_multi", 623 .name = "PCM_multi",
608 .id = MT2701_MEMIF_DLM, 624 .id = MT2701_MEMIF_DLM,
609 .suspend = mtk_afe_dai_suspend, 625 .suspend = mtk_afe_dai_suspend,
diff --git a/sound/soc/mediatek/mt2701/mt2701-cs42448.c b/sound/soc/mediatek/mt2701/mt2701-cs42448.c
index 1e7e8d43fd8a..aa5b31b121e3 100644
--- a/sound/soc/mediatek/mt2701/mt2701-cs42448.c
+++ b/sound/soc/mediatek/mt2701/mt2701-cs42448.c
@@ -129,7 +129,7 @@ static int mt2701_cs42448_fe_ops_startup(struct snd_pcm_substream *substream)
129 return 0; 129 return 0;
130} 130}
131 131
132static struct snd_soc_ops mt2701_cs42448_48k_fe_ops = { 132static const struct snd_soc_ops mt2701_cs42448_48k_fe_ops = {
133 .startup = mt2701_cs42448_fe_ops_startup, 133 .startup = mt2701_cs42448_fe_ops_startup,
134}; 134};
135 135
diff --git a/sound/soc/mediatek/mt2701/mt2701-wm8960.c b/sound/soc/mediatek/mt2701/mt2701-wm8960.c
new file mode 100644
index 000000000000..a08ce2323bdc
--- /dev/null
+++ b/sound/soc/mediatek/mt2701/mt2701-wm8960.c
@@ -0,0 +1,176 @@
1/*
2 * mt2701-wm8960.c -- MT2701 WM8960 ALSA SoC machine driver
3 *
4 * Copyright (c) 2017 MediaTek Inc.
5 * Author: Ryder Lee <ryder.lee@mediatek.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/module.h>
18#include <sound/soc.h>
19
20#include "mt2701-afe-common.h"
21
22static const struct snd_soc_dapm_widget mt2701_wm8960_widgets[] = {
23 SND_SOC_DAPM_HP("Headphone", NULL),
24 SND_SOC_DAPM_MIC("AMIC", NULL),
25};
26
27static const struct snd_kcontrol_new mt2701_wm8960_controls[] = {
28 SOC_DAPM_PIN_SWITCH("Headphone"),
29 SOC_DAPM_PIN_SWITCH("AMIC"),
30};
31
32static int mt2701_wm8960_be_ops_hw_params(struct snd_pcm_substream *substream,
33 struct snd_pcm_hw_params *params)
34{
35 struct snd_soc_pcm_runtime *rtd = substream->private_data;
36 struct snd_soc_dai *codec_dai = rtd->codec_dai;
37 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
38 unsigned int mclk_rate;
39 unsigned int rate = params_rate(params);
40 unsigned int div_mclk_over_bck = rate > 192000 ? 2 : 4;
41 unsigned int div_bck_over_lrck = 64;
42
43 mclk_rate = rate * div_bck_over_lrck * div_mclk_over_bck;
44
45 snd_soc_dai_set_sysclk(cpu_dai, 0, mclk_rate, SND_SOC_CLOCK_OUT);
46 snd_soc_dai_set_sysclk(codec_dai, 0, mclk_rate, SND_SOC_CLOCK_IN);
47
48 return 0;
49}
50
51static struct snd_soc_ops mt2701_wm8960_be_ops = {
52 .hw_params = mt2701_wm8960_be_ops_hw_params
53};
54
55static struct snd_soc_dai_link mt2701_wm8960_dai_links[] = {
56 /* FE */
57 {
58 .name = "wm8960-playback",
59 .stream_name = "wm8960-playback",
60 .cpu_dai_name = "PCMO0",
61 .codec_name = "snd-soc-dummy",
62 .codec_dai_name = "snd-soc-dummy-dai",
63 .trigger = {SND_SOC_DPCM_TRIGGER_POST,
64 SND_SOC_DPCM_TRIGGER_POST},
65 .dynamic = 1,
66 .dpcm_playback = 1,
67 },
68 {
69 .name = "wm8960-capture",
70 .stream_name = "wm8960-capture",
71 .cpu_dai_name = "PCM0",
72 .codec_name = "snd-soc-dummy",
73 .codec_dai_name = "snd-soc-dummy-dai",
74 .trigger = {SND_SOC_DPCM_TRIGGER_POST,
75 SND_SOC_DPCM_TRIGGER_POST},
76 .dynamic = 1,
77 .dpcm_capture = 1,
78 },
79 /* BE */
80 {
81 .name = "wm8960-codec",
82 .cpu_dai_name = "I2S0",
83 .no_pcm = 1,
84 .codec_dai_name = "wm8960-hifi",
85 .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS
86 | SND_SOC_DAIFMT_GATED,
87 .ops = &mt2701_wm8960_be_ops,
88 .dpcm_playback = 1,
89 .dpcm_capture = 1,
90 },
91};
92
93static struct snd_soc_card mt2701_wm8960_card = {
94 .name = "mt2701-wm8960",
95 .owner = THIS_MODULE,
96 .dai_link = mt2701_wm8960_dai_links,
97 .num_links = ARRAY_SIZE(mt2701_wm8960_dai_links),
98 .controls = mt2701_wm8960_controls,
99 .num_controls = ARRAY_SIZE(mt2701_wm8960_controls),
100 .dapm_widgets = mt2701_wm8960_widgets,
101 .num_dapm_widgets = ARRAY_SIZE(mt2701_wm8960_widgets),
102};
103
104static int mt2701_wm8960_machine_probe(struct platform_device *pdev)
105{
106 struct snd_soc_card *card = &mt2701_wm8960_card;
107 struct device_node *platform_node, *codec_node;
108 int ret, i;
109
110 platform_node = of_parse_phandle(pdev->dev.of_node,
111 "mediatek,platform", 0);
112 if (!platform_node) {
113 dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
114 return -EINVAL;
115 }
116 for (i = 0; i < card->num_links; i++) {
117 if (mt2701_wm8960_dai_links[i].platform_name)
118 continue;
119 mt2701_wm8960_dai_links[i].platform_of_node = platform_node;
120 }
121
122 card->dev = &pdev->dev;
123
124 codec_node = of_parse_phandle(pdev->dev.of_node,
125 "mediatek,audio-codec", 0);
126 if (!codec_node) {
127 dev_err(&pdev->dev,
128 "Property 'audio-codec' missing or invalid\n");
129 return -EINVAL;
130 }
131 for (i = 0; i < card->num_links; i++) {
132 if (mt2701_wm8960_dai_links[i].codec_name)
133 continue;
134 mt2701_wm8960_dai_links[i].codec_of_node = codec_node;
135 }
136
137 ret = snd_soc_of_parse_audio_routing(card, "audio-routing");
138 if (ret) {
139 dev_err(&pdev->dev, "failed to parse audio-routing: %d\n", ret);
140 return ret;
141 }
142
143 ret = devm_snd_soc_register_card(&pdev->dev, card);
144 if (ret)
145 dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
146 __func__, ret);
147
148 return ret;
149}
150
151#ifdef CONFIG_OF
152static const struct of_device_id mt2701_wm8960_machine_dt_match[] = {
153 {.compatible = "mediatek,mt2701-wm8960-machine",},
154 {}
155};
156#endif
157
158static struct platform_driver mt2701_wm8960_machine = {
159 .driver = {
160 .name = "mt2701-wm8960",
161 .owner = THIS_MODULE,
162#ifdef CONFIG_OF
163 .of_match_table = mt2701_wm8960_machine_dt_match,
164#endif
165 },
166 .probe = mt2701_wm8960_machine_probe,
167};
168
169module_platform_driver(mt2701_wm8960_machine);
170
171/* Module information */
172MODULE_DESCRIPTION("MT2701 WM8960 ALSA SoC machine driver");
173MODULE_AUTHOR("Ryder Lee <ryder.lee@mediatek.com>");
174MODULE_LICENSE("GPL v2");
175MODULE_ALIAS("mt2701 wm8960 soc card");
176
diff --git a/sound/soc/mediatek/mt8173/mt8173-max98090.c b/sound/soc/mediatek/mt8173/mt8173-max98090.c
index 46c8e6ae00b4..e0c2b23ec711 100644
--- a/sound/soc/mediatek/mt8173/mt8173-max98090.c
+++ b/sound/soc/mediatek/mt8173/mt8173-max98090.c
@@ -67,7 +67,7 @@ static int mt8173_max98090_hw_params(struct snd_pcm_substream *substream,
67 SND_SOC_CLOCK_IN); 67 SND_SOC_CLOCK_IN);
68} 68}
69 69
70static struct snd_soc_ops mt8173_max98090_ops = { 70static const struct snd_soc_ops mt8173_max98090_ops = {
71 .hw_params = mt8173_max98090_hw_params, 71 .hw_params = mt8173_max98090_hw_params,
72}; 72};
73 73
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
index 467f7049a288..5e383eb456a4 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
@@ -75,7 +75,7 @@ static int mt8173_rt5650_rt5514_hw_params(struct snd_pcm_substream *substream,
75 return 0; 75 return 0;
76} 76}
77 77
78static struct snd_soc_ops mt8173_rt5650_rt5514_ops = { 78static const struct snd_soc_ops mt8173_rt5650_rt5514_ops = {
79 .hw_params = mt8173_rt5650_rt5514_hw_params, 79 .hw_params = mt8173_rt5650_rt5514_hw_params,
80}; 80};
81 81
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
index 1b8b2a778845..fed1f15a39c2 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
@@ -79,7 +79,7 @@ static int mt8173_rt5650_rt5676_hw_params(struct snd_pcm_substream *substream,
79 return 0; 79 return 0;
80} 80}
81 81
82static struct snd_soc_ops mt8173_rt5650_rt5676_ops = { 82static const struct snd_soc_ops mt8173_rt5650_rt5676_ops = {
83 .hw_params = mt8173_rt5650_rt5676_hw_params, 83 .hw_params = mt8173_rt5650_rt5676_hw_params,
84}; 84};
85 85
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650.c b/sound/soc/mediatek/mt8173/mt8173-rt5650.c
index ba65f4157a7e..a78470839b65 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650.c
@@ -105,7 +105,7 @@ static int mt8173_rt5650_hw_params(struct snd_pcm_substream *substream,
105 return 0; 105 return 0;
106} 106}
107 107
108static struct snd_soc_ops mt8173_rt5650_ops = { 108static const struct snd_soc_ops mt8173_rt5650_ops = {
109 .hw_params = mt8173_rt5650_hw_params, 109 .hw_params = mt8173_rt5650_hw_params,
110}; 110};
111 111
diff --git a/sound/soc/omap/am3517evm.c b/sound/soc/omap/am3517evm.c
index 25a33e9d417a..d5651026ec10 100644
--- a/sound/soc/omap/am3517evm.c
+++ b/sound/soc/omap/am3517evm.c
@@ -49,7 +49,7 @@ static int am3517evm_hw_params(struct snd_pcm_substream *substream,
49 return ret; 49 return ret;
50} 50}
51 51
52static struct snd_soc_ops am3517evm_ops = { 52static const struct snd_soc_ops am3517evm_ops = {
53 .hw_params = am3517evm_hw_params, 53 .hw_params = am3517evm_hw_params,
54}; 54};
55 55
diff --git a/sound/soc/omap/n810.c b/sound/soc/omap/n810.c
index fdecb7043174..71e5f31fa306 100644
--- a/sound/soc/omap/n810.c
+++ b/sound/soc/omap/n810.c
@@ -124,7 +124,7 @@ static int n810_hw_params(struct snd_pcm_substream *substream,
124 return err; 124 return err;
125} 125}
126 126
127static struct snd_soc_ops n810_ops = { 127static const struct snd_soc_ops n810_ops = {
128 .startup = n810_startup, 128 .startup = n810_startup,
129 .hw_params = n810_hw_params, 129 .hw_params = n810_hw_params,
130 .shutdown = n810_shutdown, 130 .shutdown = n810_shutdown,
diff --git a/sound/soc/omap/omap-abe-twl6040.c b/sound/soc/omap/omap-abe-twl6040.c
index 89fe95e877db..614b18d2f631 100644
--- a/sound/soc/omap/omap-abe-twl6040.c
+++ b/sound/soc/omap/omap-abe-twl6040.c
@@ -70,7 +70,7 @@ static int omap_abe_hw_params(struct snd_pcm_substream *substream,
70 return ret; 70 return ret;
71} 71}
72 72
73static struct snd_soc_ops omap_abe_ops = { 73static const struct snd_soc_ops omap_abe_ops = {
74 .hw_params = omap_abe_hw_params, 74 .hw_params = omap_abe_hw_params,
75}; 75};
76 76
diff --git a/sound/soc/omap/omap-twl4030.c b/sound/soc/omap/omap-twl4030.c
index 743131473056..a24b0dedabb9 100644
--- a/sound/soc/omap/omap-twl4030.c
+++ b/sound/soc/omap/omap-twl4030.c
@@ -73,7 +73,7 @@ static int omap_twl4030_hw_params(struct snd_pcm_substream *substream,
73 return snd_soc_runtime_set_dai_fmt(rtd, fmt); 73 return snd_soc_runtime_set_dai_fmt(rtd, fmt);
74} 74}
75 75
76static struct snd_soc_ops omap_twl4030_ops = { 76static const struct snd_soc_ops omap_twl4030_ops = {
77 .hw_params = omap_twl4030_hw_params, 77 .hw_params = omap_twl4030_hw_params,
78}; 78};
79 79
diff --git a/sound/soc/omap/omap3pandora.c b/sound/soc/omap/omap3pandora.c
index 732e749a1f8e..4e3de712159c 100644
--- a/sound/soc/omap/omap3pandora.c
+++ b/sound/soc/omap/omap3pandora.c
@@ -184,7 +184,7 @@ static int omap3pandora_in_init(struct snd_soc_pcm_runtime *rtd)
184 return 0; 184 return 0;
185} 185}
186 186
187static struct snd_soc_ops omap3pandora_ops = { 187static const struct snd_soc_ops omap3pandora_ops = {
188 .hw_params = omap3pandora_hw_params, 188 .hw_params = omap3pandora_hw_params,
189}; 189};
190 190
diff --git a/sound/soc/omap/osk5912.c b/sound/soc/omap/osk5912.c
index aa4053bf6710..e4096779ca05 100644
--- a/sound/soc/omap/osk5912.c
+++ b/sound/soc/omap/osk5912.c
@@ -68,7 +68,7 @@ static int osk_hw_params(struct snd_pcm_substream *substream,
68 return err; 68 return err;
69} 69}
70 70
71static struct snd_soc_ops osk_ops = { 71static const struct snd_soc_ops osk_ops = {
72 .startup = osk_startup, 72 .startup = osk_startup,
73 .hw_params = osk_hw_params, 73 .hw_params = osk_hw_params,
74 .shutdown = osk_shutdown, 74 .shutdown = osk_shutdown,
diff --git a/sound/soc/omap/rx51.c b/sound/soc/omap/rx51.c
index a76845748a10..3aeb65feaea1 100644
--- a/sound/soc/omap/rx51.c
+++ b/sound/soc/omap/rx51.c
@@ -123,7 +123,7 @@ static int rx51_hw_params(struct snd_pcm_substream *substream,
123 SND_SOC_CLOCK_IN); 123 SND_SOC_CLOCK_IN);
124} 124}
125 125
126static struct snd_soc_ops rx51_ops = { 126static const struct snd_soc_ops rx51_ops = {
127 .startup = rx51_startup, 127 .startup = rx51_startup,
128 .hw_params = rx51_hw_params, 128 .hw_params = rx51_hw_params,
129}; 129};
@@ -433,10 +433,9 @@ static int rx51_soc_probe(struct platform_device *pdev)
433 } 433 }
434 434
435 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 435 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
436 if (pdata == NULL) { 436 if (pdata == NULL)
437 dev_err(card->dev, "failed to create private data\n");
438 return -ENOMEM; 437 return -ENOMEM;
439 } 438
440 snd_soc_card_set_drvdata(card, pdata); 439 snd_soc_card_set_drvdata(card, pdata);
441 440
442 pdata->tvout_selection_gpio = devm_gpiod_get(card->dev, 441 pdata->tvout_selection_gpio = devm_gpiod_get(card->dev,
diff --git a/sound/soc/pxa/brownstone.c b/sound/soc/pxa/brownstone.c
index b6cb9950f05d..9a3f5b799720 100644
--- a/sound/soc/pxa/brownstone.c
+++ b/sound/soc/pxa/brownstone.c
@@ -74,7 +74,7 @@ static int brownstone_wm8994_hw_params(struct snd_pcm_substream *substream,
74} 74}
75 75
76/* machine stream operations */ 76/* machine stream operations */
77static struct snd_soc_ops brownstone_ops = { 77static const struct snd_soc_ops brownstone_ops = {
78 .hw_params = brownstone_wm8994_hw_params, 78 .hw_params = brownstone_wm8994_hw_params,
79}; 79};
80 80
diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c
index 311774e9ca46..054e0d65db9d 100644
--- a/sound/soc/pxa/corgi.c
+++ b/sound/soc/pxa/corgi.c
@@ -154,7 +154,7 @@ static int corgi_hw_params(struct snd_pcm_substream *substream,
154 return 0; 154 return 0;
155} 155}
156 156
157static struct snd_soc_ops corgi_ops = { 157static const struct snd_soc_ops corgi_ops = {
158 .startup = corgi_startup, 158 .startup = corgi_startup,
159 .hw_params = corgi_hw_params, 159 .hw_params = corgi_hw_params,
160 .shutdown = corgi_shutdown, 160 .shutdown = corgi_shutdown,
diff --git a/sound/soc/pxa/e750_wm9705.c b/sound/soc/pxa/e750_wm9705.c
index fdcd94adee7c..82bcbbb1841b 100644
--- a/sound/soc/pxa/e750_wm9705.c
+++ b/sound/soc/pxa/e750_wm9705.c
@@ -81,7 +81,7 @@ static struct snd_soc_dai_link e750_dai[] = {
81 .name = "AC97 Aux", 81 .name = "AC97 Aux",
82 .stream_name = "AC97 Aux", 82 .stream_name = "AC97 Aux",
83 .cpu_dai_name = "pxa2xx-ac97-aux", 83 .cpu_dai_name = "pxa2xx-ac97-aux",
84 .codec_dai_name ="wm9705-aux", 84 .codec_dai_name = "wm9705-aux",
85 .platform_name = "pxa-pcm-audio", 85 .platform_name = "pxa-pcm-audio",
86 .codec_name = "wm9705-codec", 86 .codec_name = "wm9705-codec",
87 }, 87 },
diff --git a/sound/soc/pxa/e800_wm9712.c b/sound/soc/pxa/e800_wm9712.c
index 2df714f70ec0..1ed8aa2348f1 100644
--- a/sound/soc/pxa/e800_wm9712.c
+++ b/sound/soc/pxa/e800_wm9712.c
@@ -81,7 +81,7 @@ static struct snd_soc_dai_link e800_dai[] = {
81 .name = "AC97 Aux", 81 .name = "AC97 Aux",
82 .stream_name = "AC97 Aux", 82 .stream_name = "AC97 Aux",
83 .cpu_dai_name = "pxa2xx-ac97-aux", 83 .cpu_dai_name = "pxa2xx-ac97-aux",
84 .codec_dai_name ="wm9712-aux", 84 .codec_dai_name = "wm9712-aux",
85 .platform_name = "pxa-pcm-audio", 85 .platform_name = "pxa-pcm-audio",
86 .codec_name = "wm9712-codec", 86 .codec_name = "wm9712-codec",
87 }, 87 },
diff --git a/sound/soc/pxa/em-x270.c b/sound/soc/pxa/em-x270.c
index 6f2020f6c8d3..e046770ce70e 100644
--- a/sound/soc/pxa/em-x270.c
+++ b/sound/soc/pxa/em-x270.c
@@ -43,7 +43,7 @@ static struct snd_soc_dai_link em_x270_dai[] = {
43 .name = "AC97 Aux", 43 .name = "AC97 Aux",
44 .stream_name = "AC97 Aux", 44 .stream_name = "AC97 Aux",
45 .cpu_dai_name = "pxa2xx-ac97-aux", 45 .cpu_dai_name = "pxa2xx-ac97-aux",
46 .codec_dai_name ="wm9712-aux", 46 .codec_dai_name = "wm9712-aux",
47 .platform_name = "pxa-pcm-audio", 47 .platform_name = "pxa-pcm-audio",
48 .codec_name = "wm9712-codec", 48 .codec_name = "wm9712-codec",
49 }, 49 },
diff --git a/sound/soc/pxa/hx4700.c b/sound/soc/pxa/hx4700.c
index 85483049b916..a9ac881c2e14 100644
--- a/sound/soc/pxa/hx4700.c
+++ b/sound/soc/pxa/hx4700.c
@@ -79,7 +79,7 @@ static int hx4700_hw_params(struct snd_pcm_substream *substream,
79 return 0; 79 return 0;
80} 80}
81 81
82static struct snd_soc_ops hx4700_ops = { 82static const struct snd_soc_ops hx4700_ops = {
83 .hw_params = hx4700_hw_params, 83 .hw_params = hx4700_hw_params,
84}; 84};
85 85
diff --git a/sound/soc/pxa/imote2.c b/sound/soc/pxa/imote2.c
index 9d0e40771ef5..78475376f971 100644
--- a/sound/soc/pxa/imote2.c
+++ b/sound/soc/pxa/imote2.c
@@ -42,7 +42,7 @@ static int imote2_asoc_hw_params(struct snd_pcm_substream *substream,
42 return ret; 42 return ret;
43} 43}
44 44
45static struct snd_soc_ops imote2_asoc_ops = { 45static const struct snd_soc_ops imote2_asoc_ops = {
46 .hw_params = imote2_asoc_hw_params, 46 .hw_params = imote2_asoc_hw_params,
47}; 47};
48 48
diff --git a/sound/soc/pxa/magician.c b/sound/soc/pxa/magician.c
index 2d4d4455fe87..2fc012b06c43 100644
--- a/sound/soc/pxa/magician.c
+++ b/sound/soc/pxa/magician.c
@@ -255,12 +255,12 @@ static int magician_capture_hw_params(struct snd_pcm_substream *substream,
255 return 0; 255 return 0;
256} 256}
257 257
258static struct snd_soc_ops magician_capture_ops = { 258static const struct snd_soc_ops magician_capture_ops = {
259 .startup = magician_startup, 259 .startup = magician_startup,
260 .hw_params = magician_capture_hw_params, 260 .hw_params = magician_capture_hw_params,
261}; 261};
262 262
263static struct snd_soc_ops magician_playback_ops = { 263static const struct snd_soc_ops magician_playback_ops = {
264 .startup = magician_startup, 264 .startup = magician_startup,
265 .hw_params = magician_playback_hw_params, 265 .hw_params = magician_playback_hw_params,
266}; 266};
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index 8760a6687885..c4c6fbedc723 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -157,7 +157,7 @@ static struct snd_soc_dai_link mioa701_dai[] = {
157 .name = "AC97 Aux", 157 .name = "AC97 Aux",
158 .stream_name = "AC97 Aux", 158 .stream_name = "AC97 Aux",
159 .cpu_dai_name = "pxa2xx-ac97-aux", 159 .cpu_dai_name = "pxa2xx-ac97-aux",
160 .codec_dai_name ="wm9713-aux", 160 .codec_dai_name = "wm9713-aux",
161 .codec_name = "wm9713-codec", 161 .codec_name = "wm9713-codec",
162 .platform_name = "pxa-pcm-audio", 162 .platform_name = "pxa-pcm-audio",
163 .ops = &mioa701_ops, 163 .ops = &mioa701_ops,
diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c
index 96df9b2d8fc4..5b5f1a442891 100644
--- a/sound/soc/pxa/mmp-pcm.c
+++ b/sound/soc/pxa/mmp-pcm.c
@@ -166,7 +166,6 @@ static void mmp_pcm_free_dma_buffers(struct snd_pcm *pcm)
166 buf->area = NULL; 166 buf->area = NULL;
167 } 167 }
168 168
169 return;
170} 169}
171 170
172static int mmp_pcm_preallocate_dma_buffer(struct snd_pcm_substream *substream, 171static int mmp_pcm_preallocate_dma_buffer(struct snd_pcm_substream *substream,
diff --git a/sound/soc/pxa/mmp-sspa.c b/sound/soc/pxa/mmp-sspa.c
index ca8b23f8c525..9cc35012e6e5 100644
--- a/sound/soc/pxa/mmp-sspa.c
+++ b/sound/soc/pxa/mmp-sspa.c
@@ -119,7 +119,6 @@ static void mmp_sspa_shutdown(struct snd_pcm_substream *substream,
119 clk_disable(priv->sspa->clk); 119 clk_disable(priv->sspa->clk);
120 clk_disable(priv->sysclk); 120 clk_disable(priv->sysclk);
121 121
122 return;
123} 122}
124 123
125/* 124/*
diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c
index a879aba0691f..b6693f32fc02 100644
--- a/sound/soc/pxa/poodle.c
+++ b/sound/soc/pxa/poodle.c
@@ -129,7 +129,7 @@ static int poodle_hw_params(struct snd_pcm_substream *substream,
129 return 0; 129 return 0;
130} 130}
131 131
132static struct snd_soc_ops poodle_ops = { 132static const struct snd_soc_ops poodle_ops = {
133 .startup = poodle_startup, 133 .startup = poodle_startup,
134 .hw_params = poodle_hw_params, 134 .hw_params = poodle_hw_params,
135 .shutdown = poodle_shutdown, 135 .shutdown = poodle_shutdown,
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index 3cad990dad2c..0291c7cb64eb 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -354,6 +354,7 @@ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id,
354 if (ssp->type == PXA3xx_SSP) { 354 if (ssp->type == PXA3xx_SSP) {
355 u32 val; 355 u32 val;
356 u64 tmp = 19968; 356 u64 tmp = 19968;
357
357 tmp *= 1000000; 358 tmp *= 1000000;
358 do_div(tmp, freq_out); 359 do_div(tmp, freq_out);
359 val = tmp; 360 val = tmp;
@@ -590,13 +591,13 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream,
590 591
591 if ((pxa_ssp_get_scr(ssp) == 4) && (width == 16)) { 592 if ((pxa_ssp_get_scr(ssp) == 4) && (width == 16)) {
592 /* This is a special case where the bitclk is 64fs 593 /* This is a special case where the bitclk is 64fs
593 * and we're not dealing with 2*32 bits of audio 594 * and we're not dealing with 2*32 bits of audio
594 * samples. 595 * samples.
595 * 596 *
596 * The SSP values used for that are all found out by 597 * The SSP values used for that are all found out by
597 * trying and failing a lot; some of the registers 598 * trying and failing a lot; some of the registers
598 * needed for that mode are only available on PXA3xx. 599 * needed for that mode are only available on PXA3xx.
599 */ 600 */
600 if (ssp->type != PXA3xx_SSP) 601 if (ssp->type != PXA3xx_SSP)
601 return -EINVAL; 602 return -EINVAL;
602 603
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index 2e2fb1838ec2..f49bf02e5ec2 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -140,9 +140,8 @@ static int pxa2xx_ac97_mic_startup(struct snd_pcm_substream *substream,
140{ 140{
141 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 141 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
142 return -ENODEV; 142 return -ENODEV;
143 else 143 snd_soc_dai_set_dma_data(cpu_dai, substream,
144 snd_soc_dai_set_dma_data(cpu_dai, substream, 144 &pxa2xx_ac97_pcm_mic_mono_in);
145 &pxa2xx_ac97_pcm_mic_mono_in);
146 145
147 return 0; 146 return 0;
148} 147}
diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c
index 0389cf7b4b1e..3fb60baf6eab 100644
--- a/sound/soc/pxa/pxa2xx-i2s.c
+++ b/sound/soc/pxa/pxa2xx-i2s.c
@@ -46,10 +46,10 @@
46#define SACR0_STRF (1 << 5) /* FIFO Select for EFWR Special Function */ 46#define SACR0_STRF (1 << 5) /* FIFO Select for EFWR Special Function */
47#define SACR0_EFWR (1 << 4) /* Enable EFWR Function */ 47#define SACR0_EFWR (1 << 4) /* Enable EFWR Function */
48#define SACR0_RST (1 << 3) /* FIFO, i2s Register Reset */ 48#define SACR0_RST (1 << 3) /* FIFO, i2s Register Reset */
49#define SACR0_BCKD (1 << 2) /* Bit Clock Direction */ 49#define SACR0_BCKD (1 << 2) /* Bit Clock Direction */
50#define SACR0_ENB (1 << 0) /* Enable I2S Link */ 50#define SACR0_ENB (1 << 0) /* Enable I2S Link */
51#define SACR1_ENLBF (1 << 5) /* Enable Loopback */ 51#define SACR1_ENLBF (1 << 5) /* Enable Loopback */
52#define SACR1_DRPL (1 << 4) /* Disable Replaying Function */ 52#define SACR1_DRPL (1 << 4) /* Disable Replaying Function */
53#define SACR1_DREC (1 << 3) /* Disable Recording Function */ 53#define SACR1_DREC (1 << 3) /* Disable Recording Function */
54#define SACR1_AMSL (1 << 0) /* Specify Alternate Mode */ 54#define SACR1_AMSL (1 << 0) /* Specify Alternate Mode */
55 55
@@ -60,7 +60,7 @@
60#define SASR0_TFS (1 << 3) /* Tx FIFO Service Request */ 60#define SASR0_TFS (1 << 3) /* Tx FIFO Service Request */
61#define SASR0_BSY (1 << 2) /* I2S Busy */ 61#define SASR0_BSY (1 << 2) /* I2S Busy */
62#define SASR0_RNE (1 << 1) /* Rx FIFO Not Empty */ 62#define SASR0_RNE (1 << 1) /* Rx FIFO Not Empty */
63#define SASR0_TNF (1 << 0) /* Tx FIFO Not Empty */ 63#define SASR0_TNF (1 << 0) /* Tx FIFO Not Empty */
64 64
65#define SAICR_ROR (1 << 6) /* Clear Rx FIFO Overrun Interrupt */ 65#define SAICR_ROR (1 << 6) /* Clear Rx FIFO Overrun Interrupt */
66#define SAICR_TUR (1 << 5) /* Clear Tx FIFO Underrun Interrupt */ 66#define SAICR_TUR (1 << 5) /* Clear Tx FIFO Underrun Interrupt */
@@ -119,7 +119,7 @@ static int pxa_i2s_wait(void)
119 int i; 119 int i;
120 120
121 /* flush the Rx FIFO */ 121 /* flush the Rx FIFO */
122 for(i = 0; i < 16; i++) 122 for (i = 0; i < 16; i++)
123 SADR; 123 SADR;
124 return 0; 124 return 0;
125} 125}
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index 410d48b93031..b51d7a0755d5 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -85,7 +85,7 @@ static int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd)
85} 85}
86 86
87static struct snd_soc_platform_driver pxa2xx_soc_platform = { 87static struct snd_soc_platform_driver pxa2xx_soc_platform = {
88 .ops = &pxa2xx_pcm_ops, 88 .ops = &pxa2xx_pcm_ops,
89 .pcm_new = pxa2xx_soc_pcm_new, 89 .pcm_new = pxa2xx_soc_pcm_new,
90 .pcm_free = pxa2xx_pcm_free_dma_buffers, 90 .pcm_free = pxa2xx_pcm_free_dma_buffers,
91}; 91};
diff --git a/sound/soc/pxa/raumfeld.c b/sound/soc/pxa/raumfeld.c
index 552b763005ed..111a907c4eb9 100644
--- a/sound/soc/pxa/raumfeld.c
+++ b/sound/soc/pxa/raumfeld.c
@@ -132,7 +132,7 @@ static int raumfeld_cs4270_hw_params(struct snd_pcm_substream *substream,
132 return 0; 132 return 0;
133} 133}
134 134
135static struct snd_soc_ops raumfeld_cs4270_ops = { 135static const struct snd_soc_ops raumfeld_cs4270_ops = {
136 .startup = raumfeld_cs4270_startup, 136 .startup = raumfeld_cs4270_startup,
137 .shutdown = raumfeld_cs4270_shutdown, 137 .shutdown = raumfeld_cs4270_shutdown,
138 .hw_params = raumfeld_cs4270_hw_params, 138 .hw_params = raumfeld_cs4270_hw_params,
@@ -228,14 +228,12 @@ static struct snd_soc_ops raumfeld_ak4104_ops = {
228 .codec_name = "spi0.0", \ 228 .codec_name = "spi0.0", \
229} 229}
230 230
231static struct snd_soc_dai_link snd_soc_raumfeld_connector_dai[] = 231static struct snd_soc_dai_link snd_soc_raumfeld_connector_dai[] = {
232{
233 DAI_LINK_CS4270, 232 DAI_LINK_CS4270,
234 DAI_LINK_AK4104, 233 DAI_LINK_AK4104,
235}; 234};
236 235
237static struct snd_soc_dai_link snd_soc_raumfeld_speaker_dai[] = 236static struct snd_soc_dai_link snd_soc_raumfeld_speaker_dai[] = {
238{
239 DAI_LINK_CS4270, 237 DAI_LINK_CS4270,
240}; 238};
241 239
diff --git a/sound/soc/pxa/spitz.c b/sound/soc/pxa/spitz.c
index 07d77cddac60..1671da648e95 100644
--- a/sound/soc/pxa/spitz.c
+++ b/sound/soc/pxa/spitz.c
@@ -156,7 +156,7 @@ static int spitz_hw_params(struct snd_pcm_substream *substream,
156 return 0; 156 return 0;
157} 157}
158 158
159static struct snd_soc_ops spitz_ops = { 159static const struct snd_soc_ops spitz_ops = {
160 .startup = spitz_startup, 160 .startup = spitz_startup,
161 .hw_params = spitz_hw_params, 161 .hw_params = spitz_hw_params,
162}; 162};
@@ -230,8 +230,8 @@ static const struct snd_soc_dapm_route spitz_audio_map[] = {
230 {"Headset Jack", NULL, "ROUT1"}, 230 {"Headset Jack", NULL, "ROUT1"},
231 231
232 /* ext speaker connected to LOUT2, ROUT2 */ 232 /* ext speaker connected to LOUT2, ROUT2 */
233 {"Ext Spk", NULL , "ROUT2"}, 233 {"Ext Spk", NULL, "ROUT2"},
234 {"Ext Spk", NULL , "LOUT2"}, 234 {"Ext Spk", NULL, "LOUT2"},
235 235
236 /* mic is connected to input 1 - with bias */ 236 /* mic is connected to input 1 - with bias */
237 {"LINPUT1", NULL, "Mic Bias"}, 237 {"LINPUT1", NULL, "Mic Bias"},
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c
index e022b2a777f6..ae9c12e1ea2a 100644
--- a/sound/soc/pxa/tosa.c
+++ b/sound/soc/pxa/tosa.c
@@ -85,7 +85,7 @@ static int tosa_startup(struct snd_pcm_substream *substream)
85 return 0; 85 return 0;
86} 86}
87 87
88static struct snd_soc_ops tosa_ops = { 88static const struct snd_soc_ops tosa_ops = {
89 .startup = tosa_startup, 89 .startup = tosa_startup,
90}; 90};
91 91
@@ -133,7 +133,7 @@ static int tosa_set_spk(struct snd_kcontrol *kcontrol,
133static int tosa_hp_event(struct snd_soc_dapm_widget *w, 133static int tosa_hp_event(struct snd_soc_dapm_widget *w,
134 struct snd_kcontrol *k, int event) 134 struct snd_kcontrol *k, int event)
135{ 135{
136 gpio_set_value(TOSA_GPIO_L_MUTE, SND_SOC_DAPM_EVENT_ON(event) ? 1 :0); 136 gpio_set_value(TOSA_GPIO_L_MUTE, SND_SOC_DAPM_EVENT_ON(event) ? 1 : 0);
137 return 0; 137 return 0;
138} 138}
139 139
diff --git a/sound/soc/pxa/z2.c b/sound/soc/pxa/z2.c
index 990b1aa6d7f6..5b0eccd2b4dd 100644
--- a/sound/soc/pxa/z2.c
+++ b/sound/soc/pxa/z2.c
@@ -119,8 +119,8 @@ static const struct snd_soc_dapm_route z2_audio_map[] = {
119 {"Headphone Jack", NULL, "ROUT1"}, 119 {"Headphone Jack", NULL, "ROUT1"},
120 120
121 /* ext speaker connected to LOUT2, ROUT2 */ 121 /* ext speaker connected to LOUT2, ROUT2 */
122 {"Ext Spk", NULL , "ROUT2"}, 122 {"Ext Spk", NULL, "ROUT2"},
123 {"Ext Spk", NULL , "LOUT2"}, 123 {"Ext Spk", NULL, "LOUT2"},
124 124
125 /* mic is connected to R input 2 - with bias */ 125 /* mic is connected to R input 2 - with bias */
126 {"RINPUT2", NULL, "Mic Bias"}, 126 {"RINPUT2", NULL, "Mic Bias"},
@@ -152,7 +152,7 @@ err:
152 return ret; 152 return ret;
153} 153}
154 154
155static struct snd_soc_ops z2_ops = { 155static const struct snd_soc_ops z2_ops = {
156 .hw_params = z2_hw_params, 156 .hw_params = z2_hw_params,
157}; 157};
158 158
diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c
index 6fbcdf02c88d..ba468e560dd2 100644
--- a/sound/soc/pxa/zylonite.c
+++ b/sound/soc/pxa/zylonite.c
@@ -132,7 +132,7 @@ static int zylonite_voice_hw_params(struct snd_pcm_substream *substream,
132 return 0; 132 return 0;
133} 133}
134 134
135static struct snd_soc_ops zylonite_voice_ops = { 135static const struct snd_soc_ops zylonite_voice_ops = {
136 .hw_params = zylonite_voice_hw_params, 136 .hw_params = zylonite_voice_hw_params,
137}; 137};
138 138
diff --git a/sound/soc/qcom/lpass-apq8016.c b/sound/soc/qcom/lpass-apq8016.c
index 8aed72be3224..8a74844d99e2 100644
--- a/sound/soc/qcom/lpass-apq8016.c
+++ b/sound/soc/qcom/lpass-apq8016.c
@@ -231,6 +231,18 @@ static struct lpass_variant apq8016_data = {
231 .wrdma_channels = 2, 231 .wrdma_channels = 2,
232 .dai_driver = apq8016_lpass_cpu_dai_driver, 232 .dai_driver = apq8016_lpass_cpu_dai_driver,
233 .num_dai = ARRAY_SIZE(apq8016_lpass_cpu_dai_driver), 233 .num_dai = ARRAY_SIZE(apq8016_lpass_cpu_dai_driver),
234 .dai_osr_clk_names = (const char *[]) {
235 "mi2s-osr-clk0",
236 "mi2s-osr-clk1",
237 "mi2s-osr-clk2",
238 "mi2s-osr-clk3",
239 },
240 .dai_bit_clk_names = (const char *[]) {
241 "mi2s-bit-clk0",
242 "mi2s-bit-clk1",
243 "mi2s-bit-clk2",
244 "mi2s-bit-clk3",
245 },
234 .init = apq8016_lpass_init, 246 .init = apq8016_lpass_init,
235 .exit = apq8016_lpass_exit, 247 .exit = apq8016_lpass_exit,
236 .alloc_dma_channel = apq8016_lpass_alloc_dma_channel, 248 .alloc_dma_channel = apq8016_lpass_alloc_dma_channel,
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
index 5202a584e0c6..292b103abada 100644
--- a/sound/soc/qcom/lpass-cpu.c
+++ b/sound/soc/qcom/lpass-cpu.c
@@ -429,7 +429,6 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
429 struct lpass_variant *variant; 429 struct lpass_variant *variant;
430 struct device *dev = &pdev->dev; 430 struct device *dev = &pdev->dev;
431 const struct of_device_id *match; 431 const struct of_device_id *match;
432 char clk_name[16];
433 int ret, i, dai_id; 432 int ret, i, dai_id;
434 433
435 dsp_of_node = of_parse_phandle(pdev->dev.of_node, "qcom,adsp", 0); 434 dsp_of_node = of_parse_phandle(pdev->dev.of_node, "qcom,adsp", 0);
@@ -477,31 +476,24 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
477 476
478 for (i = 0; i < variant->num_dai; i++) { 477 for (i = 0; i < variant->num_dai; i++) {
479 dai_id = variant->dai_driver[i].id; 478 dai_id = variant->dai_driver[i].id;
480 if (variant->num_dai > 1)
481 sprintf(clk_name, "mi2s-osr-clk%d", i);
482 else
483 sprintf(clk_name, "mi2s-osr-clk");
484
485 drvdata->mi2s_osr_clk[dai_id] = devm_clk_get(&pdev->dev, 479 drvdata->mi2s_osr_clk[dai_id] = devm_clk_get(&pdev->dev,
486 clk_name); 480 variant->dai_osr_clk_names[i]);
487 if (IS_ERR(drvdata->mi2s_osr_clk[dai_id])) { 481 if (IS_ERR(drvdata->mi2s_osr_clk[dai_id])) {
488 dev_warn(&pdev->dev, 482 dev_warn(&pdev->dev,
489 "error getting optional mi2s-osr-clk: %ld\n", 483 "%s() error getting optional %s: %ld\n",
484 __func__,
485 variant->dai_osr_clk_names[i],
490 PTR_ERR(drvdata->mi2s_osr_clk[dai_id])); 486 PTR_ERR(drvdata->mi2s_osr_clk[dai_id]));
491 487
492 drvdata->mi2s_osr_clk[dai_id] = NULL; 488 drvdata->mi2s_osr_clk[dai_id] = NULL;
493 } 489 }
494 490
495 if (variant->num_dai > 1)
496 sprintf(clk_name, "mi2s-bit-clk%d", i);
497 else
498 sprintf(clk_name, "mi2s-bit-clk");
499
500 drvdata->mi2s_bit_clk[dai_id] = devm_clk_get(&pdev->dev, 491 drvdata->mi2s_bit_clk[dai_id] = devm_clk_get(&pdev->dev,
501 clk_name); 492 variant->dai_bit_clk_names[i]);
502 if (IS_ERR(drvdata->mi2s_bit_clk[dai_id])) { 493 if (IS_ERR(drvdata->mi2s_bit_clk[dai_id])) {
503 dev_err(&pdev->dev, 494 dev_err(&pdev->dev,
504 "error getting mi2s-bit-clk: %ld\n", 495 "error getting %s: %ld\n",
496 variant->dai_bit_clk_names[i],
505 PTR_ERR(drvdata->mi2s_bit_clk[dai_id])); 497 PTR_ERR(drvdata->mi2s_bit_clk[dai_id]));
506 return PTR_ERR(drvdata->mi2s_bit_clk[dai_id]); 498 return PTR_ERR(drvdata->mi2s_bit_clk[dai_id]);
507 } 499 }
diff --git a/sound/soc/qcom/lpass-ipq806x.c b/sound/soc/qcom/lpass-ipq806x.c
index 608c1a92af8a..ca1e1f2d2787 100644
--- a/sound/soc/qcom/lpass-ipq806x.c
+++ b/sound/soc/qcom/lpass-ipq806x.c
@@ -92,6 +92,12 @@ static struct lpass_variant ipq806x_data = {
92 .wrdma_channels = 4, 92 .wrdma_channels = 4,
93 .dai_driver = &ipq806x_lpass_cpu_dai_driver, 93 .dai_driver = &ipq806x_lpass_cpu_dai_driver,
94 .num_dai = 1, 94 .num_dai = 1,
95 .dai_osr_clk_names = (const char *[]) {
96 "mi2s-osr-clk",
97 },
98 .dai_bit_clk_names = (const char *[]) {
99 "mi2s-bit-clk",
100 },
95 .alloc_dma_channel = ipq806x_lpass_alloc_dma_channel, 101 .alloc_dma_channel = ipq806x_lpass_alloc_dma_channel,
96 .free_dma_channel = ipq806x_lpass_free_dma_channel, 102 .free_dma_channel = ipq806x_lpass_free_dma_channel,
97}; 103};
diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h
index 9b031352ea3c..b848db2d6c3d 100644
--- a/sound/soc/qcom/lpass.h
+++ b/sound/soc/qcom/lpass.h
@@ -91,6 +91,8 @@ struct lpass_variant {
91 /* SOC specific dais */ 91 /* SOC specific dais */
92 struct snd_soc_dai_driver *dai_driver; 92 struct snd_soc_dai_driver *dai_driver;
93 int num_dai; 93 int num_dai;
94 const char * const *dai_osr_clk_names;
95 const char * const *dai_bit_clk_names;
94}; 96};
95 97
96/* register the platform driver from the CPU DAI driver */ 98/* register the platform driver from the CPU DAI driver */
diff --git a/sound/soc/rockchip/rk3288_hdmi_analog.c b/sound/soc/rockchip/rk3288_hdmi_analog.c
index b60abf322ce1..dbc53e48c52c 100644
--- a/sound/soc/rockchip/rk3288_hdmi_analog.c
+++ b/sound/soc/rockchip/rk3288_hdmi_analog.c
@@ -93,6 +93,9 @@ static int rk_hw_params(struct snd_pcm_substream *substream,
93 case 96000: 93 case 96000:
94 mclk = 12288000; 94 mclk = 12288000;
95 break; 95 break;
96 case 192000:
97 mclk = 24576000;
98 break;
96 case 11025: 99 case 11025:
97 case 22050: 100 case 22050:
98 case 44100: 101 case 44100:
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index f1f1d7959a1b..0520f5afd7cc 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -185,6 +185,14 @@ config SND_SOC_SNOW
185 Say Y if you want to add audio support for various Snow 185 Say Y if you want to add audio support for various Snow
186 boards based on Exynos5 series of SoCs. 186 boards based on Exynos5 series of SoCs.
187 187
188config SND_SOC_ODROID
189 tristate "Audio support for Odroid XU3/XU4"
190 depends on SND_SOC_SAMSUNG && I2C
191 select SND_SOC_MAX98090
192 select SND_SAMSUNG_I2S
193 help
194 Say Y here to enable audio support for the Odroid XU3/XU4.
195
188config SND_SOC_ARNDALE_RT5631_ALC5631 196config SND_SOC_ARNDALE_RT5631_ALC5631
189 tristate "Audio support for RT5631(ALC5631) on Arndale Board" 197 tristate "Audio support for RT5631(ALC5631) on Arndale Board"
190 depends on I2C 198 depends on I2C
diff --git a/sound/soc/samsung/Makefile b/sound/soc/samsung/Makefile
index b5df5e2e3d94..b6c2ee358333 100644
--- a/sound/soc/samsung/Makefile
+++ b/sound/soc/samsung/Makefile
@@ -40,6 +40,7 @@ snd-soc-tobermory-objs := tobermory.o
40snd-soc-lowland-objs := lowland.o 40snd-soc-lowland-objs := lowland.o
41snd-soc-littlemill-objs := littlemill.o 41snd-soc-littlemill-objs := littlemill.o
42snd-soc-bells-objs := bells.o 42snd-soc-bells-objs := bells.o
43snd-soc-odroid-objs := odroid.o
43snd-soc-arndale-rt5631-objs := arndale_rt5631.o 44snd-soc-arndale-rt5631-objs := arndale_rt5631.o
44snd-soc-tm2-wm5110-objs := tm2_wm5110.o 45snd-soc-tm2-wm5110-objs := tm2_wm5110.o
45 46
@@ -62,5 +63,6 @@ obj-$(CONFIG_SND_SOC_TOBERMORY) += snd-soc-tobermory.o
62obj-$(CONFIG_SND_SOC_LOWLAND) += snd-soc-lowland.o 63obj-$(CONFIG_SND_SOC_LOWLAND) += snd-soc-lowland.o
63obj-$(CONFIG_SND_SOC_LITTLEMILL) += snd-soc-littlemill.o 64obj-$(CONFIG_SND_SOC_LITTLEMILL) += snd-soc-littlemill.o
64obj-$(CONFIG_SND_SOC_BELLS) += snd-soc-bells.o 65obj-$(CONFIG_SND_SOC_BELLS) += snd-soc-bells.o
66obj-$(CONFIG_SND_SOC_ODROID) += snd-soc-odroid.o
65obj-$(CONFIG_SND_SOC_ARNDALE_RT5631_ALC5631) += snd-soc-arndale-rt5631.o 67obj-$(CONFIG_SND_SOC_ARNDALE_RT5631_ALC5631) += snd-soc-arndale-rt5631.o
66obj-$(CONFIG_SND_SOC_SAMSUNG_TM2_WM5110) += snd-soc-tm2-wm5110.o 68obj-$(CONFIG_SND_SOC_SAMSUNG_TM2_WM5110) += snd-soc-tm2-wm5110.o
diff --git a/sound/soc/samsung/bells.c b/sound/soc/samsung/bells.c
index 3dd246fa0059..34deba461ae1 100644
--- a/sound/soc/samsung/bells.c
+++ b/sound/soc/samsung/bells.c
@@ -446,7 +446,6 @@ static struct snd_soc_card bells_cards[] = {
446 }, 446 },
447}; 447};
448 448
449
450static int bells_probe(struct platform_device *pdev) 449static int bells_probe(struct platform_device *pdev)
451{ 450{
452 int ret; 451 int ret;
diff --git a/sound/soc/samsung/i2s-regs.h b/sound/soc/samsung/i2s-regs.h
index 9170c311d66e..fe6914005494 100644
--- a/sound/soc/samsung/i2s-regs.h
+++ b/sound/soc/samsung/i2s-regs.h
@@ -160,5 +160,3 @@
160#define I2SSIZE_SHIFT (16) 160#define I2SSIZE_SHIFT (16)
161 161
162#endif /* __SND_SOC_SAMSUNG_I2S_REGS_H */ 162#endif /* __SND_SOC_SAMSUNG_I2S_REGS_H */
163
164
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index 52a47ed292a4..af3ba4d4ccc5 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -1242,7 +1242,6 @@ static int samsung_i2s_probe(struct platform_device *pdev)
1242 i2s_dai_data = (struct samsung_i2s_dai_data *) 1242 i2s_dai_data = (struct samsung_i2s_dai_data *)
1243 platform_get_device_id(pdev)->driver_data; 1243 platform_get_device_id(pdev)->driver_data;
1244 1244
1245
1246 pri_dai = i2s_alloc_dai(pdev, false); 1245 pri_dai = i2s_alloc_dai(pdev, false);
1247 if (!pri_dai) { 1246 if (!pri_dai) {
1248 dev_err(&pdev->dev, "Unable to alloc I2S_pri\n"); 1247 dev_err(&pdev->dev, "Unable to alloc I2S_pri\n");
diff --git a/sound/soc/samsung/odroid.c b/sound/soc/samsung/odroid.c
new file mode 100644
index 000000000000..0c0b00e40646
--- /dev/null
+++ b/sound/soc/samsung/odroid.c
@@ -0,0 +1,219 @@
1/*
2 * Copyright (C) 2017 Samsung Electronics Co., Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/clk.h>
10#include <linux/of.h>
11#include <linux/of_device.h>
12#include <linux/module.h>
13#include <sound/soc.h>
14#include <sound/pcm_params.h>
15#include "i2s.h"
16#include "i2s-regs.h"
17
18struct odroid_priv {
19 struct snd_soc_card card;
20 struct snd_soc_dai_link dai_link;
21
22 struct clk *pll;
23 struct clk *rclk;
24};
25
26static int odroid_card_startup(struct snd_pcm_substream *substream)
27{
28 struct snd_pcm_runtime *runtime = substream->runtime;
29
30 snd_pcm_hw_constraint_single(runtime, SNDRV_PCM_HW_PARAM_CHANNELS, 2);
31 return 0;
32}
33
34static int odroid_card_hw_params(struct snd_pcm_substream *substream,
35 struct snd_pcm_hw_params *params)
36{
37 struct snd_soc_pcm_runtime *rtd = substream->private_data;
38 struct odroid_priv *priv = snd_soc_card_get_drvdata(rtd->card);
39 unsigned int pll_freq, rclk_freq;
40 int ret;
41
42 switch (params_rate(params)) {
43 case 32000:
44 case 64000:
45 pll_freq = 131072000U;
46 break;
47 case 44100:
48 case 88200:
49 case 176400:
50 pll_freq = 180633600U;
51 break;
52 case 48000:
53 case 96000:
54 case 192000:
55 pll_freq = 196608000U;
56 break;
57 default:
58 return -EINVAL;
59 }
60
61 ret = clk_set_rate(priv->pll, pll_freq + 1);
62 if (ret < 0)
63 return ret;
64
65 rclk_freq = params_rate(params) * 256 * 4;
66
67 ret = clk_set_rate(priv->rclk, rclk_freq);
68 if (ret < 0)
69 return ret;
70
71 if (rtd->num_codecs > 1) {
72 struct snd_soc_dai *codec_dai = rtd->codec_dais[1];
73
74 ret = snd_soc_dai_set_sysclk(codec_dai, 0, rclk_freq,
75 SND_SOC_CLOCK_IN);
76 if (ret < 0)
77 return ret;
78 }
79
80 return 0;
81}
82
83static const struct snd_soc_ops odroid_card_ops = {
84 .startup = odroid_card_startup,
85 .hw_params = odroid_card_hw_params,
86};
87
88static void odroid_put_codec_of_nodes(struct snd_soc_dai_link *link)
89{
90 struct snd_soc_dai_link_component *component = link->codecs;
91 int i;
92
93 for (i = 0; i < link->num_codecs; i++, component++) {
94 if (!component->of_node)
95 break;
96 of_node_put(component->of_node);
97 }
98}
99
100static int odroid_audio_probe(struct platform_device *pdev)
101{
102 struct device *dev = &pdev->dev;
103 struct device_node *cpu, *codec;
104 struct odroid_priv *priv;
105 struct snd_soc_dai_link *link;
106 struct snd_soc_card *card;
107 int ret;
108
109 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
110 if (!priv)
111 return -ENOMEM;
112
113 card = &priv->card;
114 card->dev = dev;
115
116 card->owner = THIS_MODULE;
117 card->fully_routed = true;
118
119 snd_soc_card_set_drvdata(card, priv);
120
121 priv->pll = devm_clk_get(dev, "epll");
122 if (IS_ERR(priv->pll))
123 return PTR_ERR(priv->pll);
124
125 priv->rclk = devm_clk_get(dev, "i2s_rclk");
126 if (IS_ERR(priv->rclk))
127 return PTR_ERR(priv->rclk);
128
129 ret = snd_soc_of_parse_card_name(card, "model");
130 if (ret < 0)
131 return ret;
132
133 if (of_property_read_bool(dev->of_node, "samsung,audio-widgets")) {
134 ret = snd_soc_of_parse_audio_simple_widgets(card,
135 "samsung,audio-widgets");
136 if (ret < 0)
137 return ret;
138 }
139
140 if (of_property_read_bool(dev->of_node, "samsung,audio-routing")) {
141 ret = snd_soc_of_parse_audio_routing(card,
142 "samsung,audio-routing");
143 if (ret < 0)
144 return ret;
145 }
146
147 link = &priv->dai_link;
148
149 link->ops = &odroid_card_ops;
150 link->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
151 SND_SOC_DAIFMT_CBS_CFS;
152
153 card->dai_link = &priv->dai_link;
154 card->num_links = 1;
155
156 cpu = of_get_child_by_name(dev->of_node, "cpu");
157 codec = of_get_child_by_name(dev->of_node, "codec");
158
159 link->cpu_of_node = of_parse_phandle(cpu, "sound-dai", 0);
160 if (!link->cpu_of_node) {
161 dev_err(dev, "Failed parsing cpu/sound-dai property\n");
162 return -EINVAL;
163 }
164
165 ret = snd_soc_of_get_dai_link_codecs(dev, codec, link);
166 if (ret < 0)
167 goto err_put_codec_n;
168
169 link->platform_of_node = link->cpu_of_node;
170
171 link->name = "Primary";
172 link->stream_name = link->name;
173
174 ret = devm_snd_soc_register_card(dev, card);
175 if (ret < 0) {
176 dev_err(dev, "snd_soc_register_card() failed: %d\n", ret);
177 goto err_put_i2s_n;
178 }
179
180 return 0;
181
182err_put_i2s_n:
183 of_node_put(link->cpu_of_node);
184err_put_codec_n:
185 odroid_put_codec_of_nodes(link);
186 return ret;
187}
188
189static int odroid_audio_remove(struct platform_device *pdev)
190{
191 struct odroid_priv *priv = platform_get_drvdata(pdev);
192
193 of_node_put(priv->dai_link.cpu_of_node);
194 odroid_put_codec_of_nodes(&priv->dai_link);
195
196 return 0;
197}
198
199static const struct of_device_id odroid_audio_of_match[] = {
200 { .compatible = "samsung,odroid-xu3-audio" },
201 { .compatible = "samsung,odroid-xu4-audio"},
202 { },
203};
204MODULE_DEVICE_TABLE(of, odroid_audio_of_match);
205
206static struct platform_driver odroid_audio_driver = {
207 .driver = {
208 .name = "odroid-audio",
209 .of_match_table = odroid_audio_of_match,
210 .pm = &snd_soc_pm_ops,
211 },
212 .probe = odroid_audio_probe,
213 .remove = odroid_audio_remove,
214};
215module_platform_driver(odroid_audio_driver);
216
217MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
218MODULE_DESCRIPTION("Odroid XU3/XU4 audio support");
219MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
index 644f186fd35c..8f42deaa184b 100644
--- a/sound/soc/samsung/s3c-i2s-v2.c
+++ b/sound/soc/samsung/s3c-i2s-v2.c
@@ -72,7 +72,6 @@ static inline void dbg_showcon(const char *fn, u32 con)
72} 72}
73#endif 73#endif
74 74
75
76/* Turn on or off the transmission path. */ 75/* Turn on or off the transmission path. */
77static void s3c2412_snd_txctrl(struct s3c_i2sv2_info *i2s, int on) 76static void s3c2412_snd_txctrl(struct s3c_i2sv2_info *i2s, int on)
78{ 77{
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 85a33ac0a5c4..66203d107a11 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -43,6 +43,7 @@ struct rsnd_adg {
43}; 43};
44 44
45#define LRCLK_ASYNC (1 << 0) 45#define LRCLK_ASYNC (1 << 0)
46#define AUDIO_OUT_48 (1 << 1)
46#define adg_mode_flags(adg) (adg->flags) 47#define adg_mode_flags(adg) (adg->flags)
47 48
48#define for_each_rsnd_clk(pos, adg, i) \ 49#define for_each_rsnd_clk(pos, adg, i) \
@@ -364,7 +365,10 @@ found_clock:
364 365
365 rsnd_adg_set_ssi_clk(ssi_mod, data); 366 rsnd_adg_set_ssi_clk(ssi_mod, data);
366 367
367 if (!(adg_mode_flags(adg) & LRCLK_ASYNC)) { 368 if (adg_mode_flags(adg) & LRCLK_ASYNC) {
369 if (adg_mode_flags(adg) & AUDIO_OUT_48)
370 ckr = 0x80000000;
371 } else {
368 if (0 == (rate % 8000)) 372 if (0 == (rate % 8000))
369 ckr = 0x80000000; 373 ckr = 0x80000000;
370 } 374 }
@@ -427,11 +431,14 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
427 struct clk *clk; 431 struct clk *clk;
428 struct device *dev = rsnd_priv_to_dev(priv); 432 struct device *dev = rsnd_priv_to_dev(priv);
429 struct device_node *np = dev->of_node; 433 struct device_node *np = dev->of_node;
434 struct property *prop;
430 u32 ckr, rbgx, rbga, rbgb; 435 u32 ckr, rbgx, rbga, rbgb;
431 u32 rate, req_rate = 0, div; 436 u32 rate, div;
437#define REQ_SIZE 2
438 u32 req_rate[REQ_SIZE] = {};
432 uint32_t count = 0; 439 uint32_t count = 0;
433 unsigned long req_48kHz_rate, req_441kHz_rate; 440 unsigned long req_48kHz_rate, req_441kHz_rate;
434 int i; 441 int i, req_size;
435 const char *parent_clk_name = NULL; 442 const char *parent_clk_name = NULL;
436 static const char * const clkout_name[] = { 443 static const char * const clkout_name[] = {
437 [CLKOUT] = "audio_clkout", 444 [CLKOUT] = "audio_clkout",
@@ -446,19 +453,32 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
446 [CLKI] = 0x2, 453 [CLKI] = 0x2,
447 }; 454 };
448 455
449 of_property_read_u32(np, "#clock-cells", &count); 456 ckr = 0;
457 rbga = 2; /* default 1/6 */
458 rbgb = 2; /* default 1/6 */
450 459
451 /* 460 /*
452 * ADG supports BRRA/BRRB output only 461 * ADG supports BRRA/BRRB output only
453 * this means all clkout0/1/2/3 will be same rate 462 * this means all clkout0/1/2/3 will be same rate
454 */ 463 */
455 of_property_read_u32(np, "clock-frequency", &req_rate); 464 prop = of_find_property(np, "clock-frequency", NULL);
465 if (!prop)
466 goto rsnd_adg_get_clkout_end;
467
468 req_size = prop->length / sizeof(u32);
469
470 of_property_read_u32_array(np, "clock-frequency", req_rate, req_size);
456 req_48kHz_rate = 0; 471 req_48kHz_rate = 0;
457 req_441kHz_rate = 0; 472 req_441kHz_rate = 0;
458 if (0 == (req_rate % 44100)) 473 for (i = 0; i < req_size; i++) {
459 req_441kHz_rate = req_rate; 474 if (0 == (req_rate[i] % 44100))
460 if (0 == (req_rate % 48000)) 475 req_441kHz_rate = req_rate[i];
461 req_48kHz_rate = req_rate; 476 if (0 == (req_rate[i] % 48000))
477 req_48kHz_rate = req_rate[i];
478 }
479
480 if (req_rate[0] % 48000 == 0)
481 adg->flags = AUDIO_OUT_48;
462 482
463 /* 483 /*
464 * This driver is assuming that AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC 484 * This driver is assuming that AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC
@@ -469,9 +489,6 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
469 * rsnd_adg_ssi_clk_try_start() 489 * rsnd_adg_ssi_clk_try_start()
470 * rsnd_ssi_master_clk_start() 490 * rsnd_ssi_master_clk_start()
471 */ 491 */
472 ckr = 0;
473 rbga = 2; /* default 1/6 */
474 rbgb = 2; /* default 1/6 */
475 adg->rbga_rate_for_441khz = 0; 492 adg->rbga_rate_for_441khz = 0;
476 adg->rbgb_rate_for_48khz = 0; 493 adg->rbgb_rate_for_48khz = 0;
477 for_each_rsnd_clk(clk, adg, i) { 494 for_each_rsnd_clk(clk, adg, i) {
@@ -505,10 +522,8 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
505 rbgb = rbgx; 522 rbgb = rbgx;
506 adg->rbgb_rate_for_48khz = rate / div; 523 adg->rbgb_rate_for_48khz = rate / div;
507 ckr |= brg_table[i] << 16; 524 ckr |= brg_table[i] << 16;
508 if (req_48kHz_rate) { 525 if (req_48kHz_rate)
509 parent_clk_name = __clk_get_name(clk); 526 parent_clk_name = __clk_get_name(clk);
510 ckr |= 0x80000000;
511 }
512 } 527 }
513 } 528 }
514 } 529 }
@@ -518,12 +533,13 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
518 * this means all clkout0/1/2/3 will be * same rate 533 * this means all clkout0/1/2/3 will be * same rate
519 */ 534 */
520 535
536 of_property_read_u32(np, "#clock-cells", &count);
521 /* 537 /*
522 * for clkout 538 * for clkout
523 */ 539 */
524 if (!count) { 540 if (!count) {
525 clk = clk_register_fixed_rate(dev, clkout_name[CLKOUT], 541 clk = clk_register_fixed_rate(dev, clkout_name[CLKOUT],
526 parent_clk_name, 0, req_rate); 542 parent_clk_name, 0, req_rate[0]);
527 if (!IS_ERR(clk)) { 543 if (!IS_ERR(clk)) {
528 adg->clkout[CLKOUT] = clk; 544 adg->clkout[CLKOUT] = clk;
529 of_clk_add_provider(np, of_clk_src_simple_get, clk); 545 of_clk_add_provider(np, of_clk_src_simple_get, clk);
@@ -536,19 +552,18 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
536 for (i = 0; i < CLKOUTMAX; i++) { 552 for (i = 0; i < CLKOUTMAX; i++) {
537 clk = clk_register_fixed_rate(dev, clkout_name[i], 553 clk = clk_register_fixed_rate(dev, clkout_name[i],
538 parent_clk_name, 0, 554 parent_clk_name, 0,
539 req_rate); 555 req_rate[0]);
540 if (!IS_ERR(clk)) { 556 adg->clkout[i] = ERR_PTR(-ENOENT);
541 adg->onecell.clks = adg->clkout; 557 if (!IS_ERR(clk))
542 adg->onecell.clk_num = CLKOUTMAX;
543
544 adg->clkout[i] = clk; 558 adg->clkout[i] = clk;
545
546 of_clk_add_provider(np, of_clk_src_onecell_get,
547 &adg->onecell);
548 }
549 } 559 }
560 adg->onecell.clks = adg->clkout;
561 adg->onecell.clk_num = CLKOUTMAX;
562 of_clk_add_provider(np, of_clk_src_onecell_get,
563 &adg->onecell);
550 } 564 }
551 565
566rsnd_adg_get_clkout_end:
552 adg->ckr = ckr; 567 adg->ckr = ckr;
553 adg->rbga = rbga; 568 adg->rbga = rbga;
554 adg->rbgb = rbgb; 569 adg->rbgb = rbgb;
@@ -564,6 +579,7 @@ int rsnd_adg_probe(struct rsnd_priv *priv)
564 struct rsnd_adg *adg; 579 struct rsnd_adg *adg;
565 struct device *dev = rsnd_priv_to_dev(priv); 580 struct device *dev = rsnd_priv_to_dev(priv);
566 struct device_node *np = dev->of_node; 581 struct device_node *np = dev->of_node;
582 int ret;
567 583
568 adg = devm_kzalloc(dev, sizeof(*adg), GFP_KERNEL); 584 adg = devm_kzalloc(dev, sizeof(*adg), GFP_KERNEL);
569 if (!adg) { 585 if (!adg) {
@@ -571,8 +587,10 @@ int rsnd_adg_probe(struct rsnd_priv *priv)
571 return -ENOMEM; 587 return -ENOMEM;
572 } 588 }
573 589
574 rsnd_mod_init(priv, &adg->mod, &adg_ops, 590 ret = rsnd_mod_init(priv, &adg->mod, &adg_ops,
575 NULL, NULL, 0, 0); 591 NULL, NULL, 0, 0);
592 if (ret)
593 return ret;
576 594
577 rsnd_adg_get_clkin(priv, adg); 595 rsnd_adg_get_clkin(priv, adg);
578 rsnd_adg_get_clkout(priv, adg); 596 rsnd_adg_get_clkout(priv, adg);
@@ -589,5 +607,10 @@ int rsnd_adg_probe(struct rsnd_priv *priv)
589 607
590void rsnd_adg_remove(struct rsnd_priv *priv) 608void rsnd_adg_remove(struct rsnd_priv *priv)
591{ 609{
610 struct device *dev = rsnd_priv_to_dev(priv);
611 struct device_node *np = dev->of_node;
612
613 of_clk_del_provider(np);
614
592 rsnd_adg_clk_disable(priv); 615 rsnd_adg_clk_disable(priv);
593} 616}
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 47b370cb2d3b..1744015408c3 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -96,7 +96,7 @@
96#include <linux/pm_runtime.h> 96#include <linux/pm_runtime.h>
97#include "rsnd.h" 97#include "rsnd.h"
98 98
99#define RSND_RATES SNDRV_PCM_RATE_8000_96000 99#define RSND_RATES SNDRV_PCM_RATE_8000_192000
100#define RSND_FMTS (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE) 100#define RSND_FMTS (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE)
101 101
102static const struct of_device_id rsnd_of_match[] = { 102static const struct of_device_id rsnd_of_match[] = {
@@ -110,7 +110,6 @@ MODULE_DEVICE_TABLE(of, rsnd_of_match);
110/* 110/*
111 * rsnd_mod functions 111 * rsnd_mod functions
112 */ 112 */
113#ifdef DEBUG
114void rsnd_mod_make_sure(struct rsnd_mod *mod, enum rsnd_mod_type type) 113void rsnd_mod_make_sure(struct rsnd_mod *mod, enum rsnd_mod_type type)
115{ 114{
116 if (mod->type != type) { 115 if (mod->type != type) {
@@ -121,7 +120,6 @@ void rsnd_mod_make_sure(struct rsnd_mod *mod, enum rsnd_mod_type type)
121 rsnd_mod_name(mod), rsnd_mod_id(mod)); 120 rsnd_mod_name(mod), rsnd_mod_id(mod));
122 } 121 }
123} 122}
124#endif
125 123
126char *rsnd_mod_name(struct rsnd_mod *mod) 124char *rsnd_mod_name(struct rsnd_mod *mod)
127{ 125{
@@ -674,12 +672,10 @@ static int rsnd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
674 /* set clock inversion */ 672 /* set clock inversion */
675 switch (fmt & SND_SOC_DAIFMT_INV_MASK) { 673 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
676 case SND_SOC_DAIFMT_NB_IF: 674 case SND_SOC_DAIFMT_NB_IF:
677 rdai->bit_clk_inv = rdai->bit_clk_inv;
678 rdai->frm_clk_inv = !rdai->frm_clk_inv; 675 rdai->frm_clk_inv = !rdai->frm_clk_inv;
679 break; 676 break;
680 case SND_SOC_DAIFMT_IB_NF: 677 case SND_SOC_DAIFMT_IB_NF:
681 rdai->bit_clk_inv = !rdai->bit_clk_inv; 678 rdai->bit_clk_inv = !rdai->bit_clk_inv;
682 rdai->frm_clk_inv = rdai->frm_clk_inv;
683 break; 679 break;
684 case SND_SOC_DAIFMT_IB_IF: 680 case SND_SOC_DAIFMT_IB_IF:
685 rdai->bit_clk_inv = !rdai->bit_clk_inv; 681 rdai->bit_clk_inv = !rdai->bit_clk_inv;
@@ -1002,13 +998,30 @@ static int rsnd_kctrl_put(struct snd_kcontrol *kctrl,
1002 return change; 998 return change;
1003} 999}
1004 1000
1005static int __rsnd_kctrl_new(struct rsnd_mod *mod, 1001struct rsnd_kctrl_cfg *rsnd_kctrl_init_m(struct rsnd_kctrl_cfg_m *cfg)
1006 struct rsnd_dai_stream *io, 1002{
1007 struct snd_soc_pcm_runtime *rtd, 1003 cfg->cfg.val = cfg->val;
1008 const unsigned char *name, 1004
1009 struct rsnd_kctrl_cfg *cfg, 1005 return &cfg->cfg;
1010 void (*update)(struct rsnd_dai_stream *io, 1006}
1011 struct rsnd_mod *mod)) 1007
1008struct rsnd_kctrl_cfg *rsnd_kctrl_init_s(struct rsnd_kctrl_cfg_s *cfg)
1009{
1010 cfg->cfg.val = &cfg->val;
1011
1012 return &cfg->cfg;
1013}
1014
1015int rsnd_kctrl_new(struct rsnd_mod *mod,
1016 struct rsnd_dai_stream *io,
1017 struct snd_soc_pcm_runtime *rtd,
1018 const unsigned char *name,
1019 void (*update)(struct rsnd_dai_stream *io,
1020 struct rsnd_mod *mod),
1021 struct rsnd_kctrl_cfg *cfg,
1022 const char * const *texts,
1023 int size,
1024 u32 max)
1012{ 1025{
1013 struct snd_card *card = rtd->card->snd_card; 1026 struct snd_card *card = rtd->card->snd_card;
1014 struct snd_kcontrol *kctrl; 1027 struct snd_kcontrol *kctrl;
@@ -1023,6 +1036,9 @@ static int __rsnd_kctrl_new(struct rsnd_mod *mod,
1023 }; 1036 };
1024 int ret; 1037 int ret;
1025 1038
1039 if (size > RSND_MAX_CHANNELS)
1040 return -EINVAL;
1041
1026 kctrl = snd_ctl_new1(&knew, mod); 1042 kctrl = snd_ctl_new1(&knew, mod);
1027 if (!kctrl) 1043 if (!kctrl)
1028 return -ENOMEM; 1044 return -ENOMEM;
@@ -1031,74 +1047,17 @@ static int __rsnd_kctrl_new(struct rsnd_mod *mod,
1031 if (ret < 0) 1047 if (ret < 0)
1032 return ret; 1048 return ret;
1033 1049
1034 cfg->update = update; 1050 cfg->texts = texts;
1035 cfg->card = card; 1051 cfg->max = max;
1036 cfg->kctrl = kctrl; 1052 cfg->size = size;
1037 cfg->io = io; 1053 cfg->update = update;
1054 cfg->card = card;
1055 cfg->kctrl = kctrl;
1056 cfg->io = io;
1038 1057
1039 return 0; 1058 return 0;
1040} 1059}
1041 1060
1042void _rsnd_kctrl_remove(struct rsnd_kctrl_cfg *cfg)
1043{
1044 if (cfg->card && cfg->kctrl)
1045 snd_ctl_remove(cfg->card, cfg->kctrl);
1046
1047 cfg->card = NULL;
1048 cfg->kctrl = NULL;
1049}
1050
1051int rsnd_kctrl_new_m(struct rsnd_mod *mod,
1052 struct rsnd_dai_stream *io,
1053 struct snd_soc_pcm_runtime *rtd,
1054 const unsigned char *name,
1055 void (*update)(struct rsnd_dai_stream *io,
1056 struct rsnd_mod *mod),
1057 struct rsnd_kctrl_cfg_m *_cfg,
1058 int ch_size,
1059 u32 max)
1060{
1061 if (ch_size > RSND_MAX_CHANNELS)
1062 return -EINVAL;
1063
1064 _cfg->cfg.max = max;
1065 _cfg->cfg.size = ch_size;
1066 _cfg->cfg.val = _cfg->val;
1067 return __rsnd_kctrl_new(mod, io, rtd, name, &_cfg->cfg, update);
1068}
1069
1070int rsnd_kctrl_new_s(struct rsnd_mod *mod,
1071 struct rsnd_dai_stream *io,
1072 struct snd_soc_pcm_runtime *rtd,
1073 const unsigned char *name,
1074 void (*update)(struct rsnd_dai_stream *io,
1075 struct rsnd_mod *mod),
1076 struct rsnd_kctrl_cfg_s *_cfg,
1077 u32 max)
1078{
1079 _cfg->cfg.max = max;
1080 _cfg->cfg.size = 1;
1081 _cfg->cfg.val = &_cfg->val;
1082 return __rsnd_kctrl_new(mod, io, rtd, name, &_cfg->cfg, update);
1083}
1084
1085int rsnd_kctrl_new_e(struct rsnd_mod *mod,
1086 struct rsnd_dai_stream *io,
1087 struct snd_soc_pcm_runtime *rtd,
1088 const unsigned char *name,
1089 struct rsnd_kctrl_cfg_s *_cfg,
1090 void (*update)(struct rsnd_dai_stream *io,
1091 struct rsnd_mod *mod),
1092 const char * const *texts,
1093 u32 max)
1094{
1095 _cfg->cfg.max = max;
1096 _cfg->cfg.size = 1;
1097 _cfg->cfg.val = &_cfg->val;
1098 _cfg->cfg.texts = texts;
1099 return __rsnd_kctrl_new(mod, io, rtd, name, &_cfg->cfg, update);
1100}
1101
1102/* 1061/*
1103 * snd_soc_platform 1062 * snd_soc_platform
1104 */ 1063 */
diff --git a/sound/soc/sh/rcar/dvc.c b/sound/soc/sh/rcar/dvc.c
index cf8f59cdd8d7..463de8360985 100644
--- a/sound/soc/sh/rcar/dvc.c
+++ b/sound/soc/sh/rcar/dvc.c
@@ -218,21 +218,6 @@ static int rsnd_dvc_probe_(struct rsnd_mod *mod,
218 return rsnd_cmd_attach(io, rsnd_mod_id(mod)); 218 return rsnd_cmd_attach(io, rsnd_mod_id(mod));
219} 219}
220 220
221static int rsnd_dvc_remove_(struct rsnd_mod *mod,
222 struct rsnd_dai_stream *io,
223 struct rsnd_priv *priv)
224{
225 struct rsnd_dvc *dvc = rsnd_mod_to_dvc(mod);
226
227 rsnd_kctrl_remove(dvc->volume);
228 rsnd_kctrl_remove(dvc->mute);
229 rsnd_kctrl_remove(dvc->ren);
230 rsnd_kctrl_remove(dvc->rup);
231 rsnd_kctrl_remove(dvc->rdown);
232
233 return 0;
234}
235
236static int rsnd_dvc_init(struct rsnd_mod *mod, 221static int rsnd_dvc_init(struct rsnd_mod *mod,
237 struct rsnd_dai_stream *io, 222 struct rsnd_dai_stream *io,
238 struct rsnd_priv *priv) 223 struct rsnd_priv *priv)
@@ -300,18 +285,18 @@ static int rsnd_dvc_pcm_new(struct rsnd_mod *mod,
300 ret = rsnd_kctrl_new_e(mod, io, rtd, 285 ret = rsnd_kctrl_new_e(mod, io, rtd,
301 is_play ? 286 is_play ?
302 "DVC Out Ramp Up Rate" : "DVC In Ramp Up Rate", 287 "DVC Out Ramp Up Rate" : "DVC In Ramp Up Rate",
303 &dvc->rup,
304 rsnd_dvc_volume_update, 288 rsnd_dvc_volume_update,
305 dvc_ramp_rate, ARRAY_SIZE(dvc_ramp_rate)); 289 &dvc->rup,
290 dvc_ramp_rate);
306 if (ret < 0) 291 if (ret < 0)
307 return ret; 292 return ret;
308 293
309 ret = rsnd_kctrl_new_e(mod, io, rtd, 294 ret = rsnd_kctrl_new_e(mod, io, rtd,
310 is_play ? 295 is_play ?
311 "DVC Out Ramp Down Rate" : "DVC In Ramp Down Rate", 296 "DVC Out Ramp Down Rate" : "DVC In Ramp Down Rate",
312 &dvc->rdown,
313 rsnd_dvc_volume_update, 297 rsnd_dvc_volume_update,
314 dvc_ramp_rate, ARRAY_SIZE(dvc_ramp_rate)); 298 &dvc->rdown,
299 dvc_ramp_rate);
315 300
316 if (ret < 0) 301 if (ret < 0)
317 return ret; 302 return ret;
@@ -332,7 +317,6 @@ static struct rsnd_mod_ops rsnd_dvc_ops = {
332 .name = DVC_NAME, 317 .name = DVC_NAME,
333 .dma_req = rsnd_dvc_dma_req, 318 .dma_req = rsnd_dvc_dma_req,
334 .probe = rsnd_dvc_probe_, 319 .probe = rsnd_dvc_probe_,
335 .remove = rsnd_dvc_remove_,
336 .init = rsnd_dvc_init, 320 .init = rsnd_dvc_init,
337 .quit = rsnd_dvc_quit, 321 .quit = rsnd_dvc_quit,
338 .pcm_new = rsnd_dvc_pcm_new, 322 .pcm_new = rsnd_dvc_pcm_new,
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 7410ec0174db..dbf4163427e8 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -611,35 +611,30 @@ struct rsnd_kctrl_cfg_s {
611 u32 val; 611 u32 val;
612}; 612};
613 613
614void _rsnd_kctrl_remove(struct rsnd_kctrl_cfg *cfg); 614struct rsnd_kctrl_cfg *rsnd_kctrl_init_m(struct rsnd_kctrl_cfg_m *cfg);
615#define rsnd_kctrl_remove(_cfg) _rsnd_kctrl_remove(&((_cfg).cfg)) 615struct rsnd_kctrl_cfg *rsnd_kctrl_init_s(struct rsnd_kctrl_cfg_s *cfg);
616 616int rsnd_kctrl_new(struct rsnd_mod *mod,
617int rsnd_kctrl_new_m(struct rsnd_mod *mod, 617 struct rsnd_dai_stream *io,
618 struct rsnd_dai_stream *io, 618 struct snd_soc_pcm_runtime *rtd,
619 struct snd_soc_pcm_runtime *rtd, 619 const unsigned char *name,
620 const unsigned char *name, 620 void (*update)(struct rsnd_dai_stream *io,
621 void (*update)(struct rsnd_dai_stream *io, 621 struct rsnd_mod *mod),
622 struct rsnd_mod *mod), 622 struct rsnd_kctrl_cfg *cfg,
623 struct rsnd_kctrl_cfg_m *_cfg, 623 const char * const *texts,
624 int ch_size, 624 int size,
625 u32 max); 625 u32 max);
626int rsnd_kctrl_new_s(struct rsnd_mod *mod, 626
627 struct rsnd_dai_stream *io, 627#define rsnd_kctrl_new_m(mod, io, rtd, name, update, cfg, size, max) \
628 struct snd_soc_pcm_runtime *rtd, 628 rsnd_kctrl_new(mod, io, rtd, name, update, rsnd_kctrl_init_m(cfg), \
629 const unsigned char *name, 629 NULL, size, max)
630 void (*update)(struct rsnd_dai_stream *io, 630
631 struct rsnd_mod *mod), 631#define rsnd_kctrl_new_s(mod, io, rtd, name, update, cfg, max) \
632 struct rsnd_kctrl_cfg_s *_cfg, 632 rsnd_kctrl_new(mod, io, rtd, name, update, rsnd_kctrl_init_s(cfg), \
633 u32 max); 633 NULL, 1, max)
634int rsnd_kctrl_new_e(struct rsnd_mod *mod, 634
635 struct rsnd_dai_stream *io, 635#define rsnd_kctrl_new_e(mod, io, rtd, name, update, cfg, texts) \
636 struct snd_soc_pcm_runtime *rtd, 636 rsnd_kctrl_new(mod, io, rtd, name, update, rsnd_kctrl_init_s(cfg), \
637 const unsigned char *name, 637 texts, 1, ARRAY_SIZE(texts))
638 struct rsnd_kctrl_cfg_s *_cfg,
639 void (*update)(struct rsnd_dai_stream *io,
640 struct rsnd_mod *mod),
641 const char * const *texts,
642 u32 max);
643 638
644/* 639/*
645 * R-Car SSI 640 * R-Car SSI
@@ -732,8 +727,8 @@ void rsnd_cmd_remove(struct rsnd_priv *priv);
732int rsnd_cmd_attach(struct rsnd_dai_stream *io, int id); 727int rsnd_cmd_attach(struct rsnd_dai_stream *io, int id);
733struct rsnd_mod *rsnd_cmd_mod_get(struct rsnd_priv *priv, int id); 728struct rsnd_mod *rsnd_cmd_mod_get(struct rsnd_priv *priv, int id);
734 729
735#ifdef DEBUG
736void rsnd_mod_make_sure(struct rsnd_mod *mod, enum rsnd_mod_type type); 730void rsnd_mod_make_sure(struct rsnd_mod *mod, enum rsnd_mod_type type);
731#ifdef DEBUG
737#define rsnd_mod_confirm_ssi(mssi) rsnd_mod_make_sure(mssi, RSND_MOD_SSI) 732#define rsnd_mod_confirm_ssi(mssi) rsnd_mod_make_sure(mssi, RSND_MOD_SSI)
738#define rsnd_mod_confirm_src(msrc) rsnd_mod_make_sure(msrc, RSND_MOD_SRC) 733#define rsnd_mod_confirm_src(msrc) rsnd_mod_make_sure(msrc, RSND_MOD_SRC)
739#define rsnd_mod_confirm_dvc(mdvc) rsnd_mod_make_sure(mdvc, RSND_MOD_DVC) 734#define rsnd_mod_confirm_dvc(mdvc) rsnd_mod_make_sure(mdvc, RSND_MOD_DVC)
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index 42db48db09ba..20b5b2ec625e 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -167,6 +167,7 @@ static int rsnd_src_hw_params(struct rsnd_mod *mod,
167 * dpcm_fe_dai_hw_params() 167 * dpcm_fe_dai_hw_params()
168 * dpcm_be_dai_hw_params() 168 * dpcm_be_dai_hw_params()
169 */ 169 */
170 src->convert_rate = 0;
170 if (fe->dai_link->dynamic) { 171 if (fe->dai_link->dynamic) {
171 int stream = substream->stream; 172 int stream = substream->stream;
172 struct snd_soc_dpcm *dpcm; 173 struct snd_soc_dpcm *dpcm;
@@ -414,8 +415,6 @@ static int rsnd_src_quit(struct rsnd_mod *mod,
414 415
415 rsnd_mod_power_off(mod); 416 rsnd_mod_power_off(mod);
416 417
417 src->convert_rate = 0;
418
419 /* reset sync convert_rate */ 418 /* reset sync convert_rate */
420 src->sync.val = 0; 419 src->sync.val = 0;
421 420
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 411bda2387ad..135c5669f796 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -228,6 +228,15 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
228 for (j = 0; j < ARRAY_SIZE(ssi_clk_mul_table); j++) { 228 for (j = 0; j < ARRAY_SIZE(ssi_clk_mul_table); j++) {
229 229
230 /* 230 /*
231 * It will set SSIWSR.CONT here, but SSICR.CKDV = 000
232 * with it is not allowed. (SSIWSR.WS_MODE with
233 * SSICR.CKDV = 000 is not allowed either).
234 * Skip it. See SSICR.CKDV
235 */
236 if (j == 0)
237 continue;
238
239 /*
231 * this driver is assuming that 240 * this driver is assuming that
232 * system word is 32bit x chan 241 * system word is 32bit x chan
233 * see rsnd_ssi_init() 242 * see rsnd_ssi_init()
diff --git a/sound/soc/sirf/sirf-audio-port.c b/sound/soc/sirf/sirf-audio-port.c
index 3f2cce03275c..be066de74aaa 100644
--- a/sound/soc/sirf/sirf-audio-port.c
+++ b/sound/soc/sirf/sirf-audio-port.c
@@ -19,6 +19,7 @@ struct sirf_audio_port {
19static int sirf_audio_port_dai_probe(struct snd_soc_dai *dai) 19static int sirf_audio_port_dai_probe(struct snd_soc_dai *dai)
20{ 20{
21 struct sirf_audio_port *port = snd_soc_dai_get_drvdata(dai); 21 struct sirf_audio_port *port = snd_soc_dai_get_drvdata(dai);
22
22 snd_soc_dai_init_dma_data(dai, &port->playback_dma_data, 23 snd_soc_dai_init_dma_data(dai, &port->playback_dma_data,
23 &port->capture_dma_data); 24 &port->capture_dma_data);
24 return 0; 25 return 0;
diff --git a/sound/soc/sirf/sirf-audio.c b/sound/soc/sirf/sirf-audio.c
index 94ea152e0362..f2bc50790f76 100644
--- a/sound/soc/sirf/sirf-audio.c
+++ b/sound/soc/sirf/sirf-audio.c
@@ -27,6 +27,7 @@ static int sirf_audio_hp_event(struct snd_soc_dapm_widget *w,
27 struct snd_soc_card *card = dapm->card; 27 struct snd_soc_card *card = dapm->card;
28 struct sirf_audio_card *sirf_audio_card = snd_soc_card_get_drvdata(card); 28 struct sirf_audio_card *sirf_audio_card = snd_soc_card_get_drvdata(card);
29 int on = !SND_SOC_DAPM_EVENT_OFF(event); 29 int on = !SND_SOC_DAPM_EVENT_OFF(event);
30
30 if (gpio_is_valid(sirf_audio_card->gpio_hp_pa)) 31 if (gpio_is_valid(sirf_audio_card->gpio_hp_pa))
31 gpio_set_value(sirf_audio_card->gpio_hp_pa, on); 32 gpio_set_value(sirf_audio_card->gpio_hp_pa, on);
32 return 0; 33 return 0;
diff --git a/sound/soc/sirf/sirf-usp.c b/sound/soc/sirf/sirf-usp.c
index 45fc06c0e0e5..77e7dcf969d0 100644
--- a/sound/soc/sirf/sirf-usp.c
+++ b/sound/soc/sirf/sirf-usp.c
@@ -71,6 +71,7 @@ static void sirf_usp_rx_disable(struct sirf_usp *usp)
71static int sirf_usp_pcm_dai_probe(struct snd_soc_dai *dai) 71static int sirf_usp_pcm_dai_probe(struct snd_soc_dai *dai)
72{ 72{
73 struct sirf_usp *usp = snd_soc_dai_get_drvdata(dai); 73 struct sirf_usp *usp = snd_soc_dai_get_drvdata(dai);
74
74 snd_soc_dai_init_dma_data(dai, &usp->playback_dma_data, 75 snd_soc_dai_init_dma_data(dai, &usp->playback_dma_data,
75 &usp->capture_dma_data); 76 &usp->capture_dma_data);
76 return 0; 77 return 0;
@@ -294,6 +295,7 @@ static struct snd_soc_dai_driver sirf_usp_pcm_dai = {
294static int sirf_usp_pcm_runtime_suspend(struct device *dev) 295static int sirf_usp_pcm_runtime_suspend(struct device *dev)
295{ 296{
296 struct sirf_usp *usp = dev_get_drvdata(dev); 297 struct sirf_usp *usp = dev_get_drvdata(dev);
298
297 clk_disable_unprepare(usp->clk); 299 clk_disable_unprepare(usp->clk);
298 return 0; 300 return 0;
299} 301}
@@ -302,6 +304,7 @@ static int sirf_usp_pcm_runtime_resume(struct device *dev)
302{ 304{
303 struct sirf_usp *usp = dev_get_drvdata(dev); 305 struct sirf_usp *usp = dev_get_drvdata(dev);
304 int ret; 306 int ret;
307
305 ret = clk_prepare_enable(usp->clk); 308 ret = clk_prepare_enable(usp->clk);
306 if (ret) { 309 if (ret) {
307 dev_err(dev, "clk_enable failed: %d\n", ret); 310 dev_err(dev, "clk_enable failed: %d\n", ret);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 2722bb0c5573..525f2f397b4c 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1776,7 +1776,6 @@ static int soc_bind_aux_dev(struct snd_soc_card *card, int num)
1776 } 1776 }
1777 1777
1778 component->init = aux_dev->init; 1778 component->init = aux_dev->init;
1779 component->auxiliary = 1;
1780 list_add(&component->card_aux_list, &card->aux_comp_list); 1779 list_add(&component->card_aux_list, &card->aux_comp_list);
1781 1780
1782 return 0; 1781 return 0;
@@ -1788,14 +1787,13 @@ err_defer:
1788 1787
1789static int soc_probe_aux_devices(struct snd_soc_card *card) 1788static int soc_probe_aux_devices(struct snd_soc_card *card)
1790{ 1789{
1791 struct snd_soc_component *comp, *tmp; 1790 struct snd_soc_component *comp;
1792 int order; 1791 int order;
1793 int ret; 1792 int ret;
1794 1793
1795 for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST; 1794 for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
1796 order++) { 1795 order++) {
1797 list_for_each_entry_safe(comp, tmp, &card->aux_comp_list, 1796 list_for_each_entry(comp, &card->aux_comp_list, card_aux_list) {
1798 card_aux_list) {
1799 if (comp->driver->probe_order == order) { 1797 if (comp->driver->probe_order == order) {
1800 ret = soc_probe_component(card, comp); 1798 ret = soc_probe_component(card, comp);
1801 if (ret < 0) { 1799 if (ret < 0) {
@@ -1804,7 +1802,6 @@ static int soc_probe_aux_devices(struct snd_soc_card *card)
1804 comp->name, ret); 1802 comp->name, ret);
1805 return ret; 1803 return ret;
1806 } 1804 }
1807 list_del(&comp->card_aux_list);
1808 } 1805 }
1809 } 1806 }
1810 } 1807 }
@@ -1820,14 +1817,12 @@ static void soc_remove_aux_devices(struct snd_soc_card *card)
1820 for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST; 1817 for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
1821 order++) { 1818 order++) {
1822 list_for_each_entry_safe(comp, _comp, 1819 list_for_each_entry_safe(comp, _comp,
1823 &card->component_dev_list, card_list) { 1820 &card->aux_comp_list, card_aux_list) {
1824
1825 if (!comp->auxiliary)
1826 continue;
1827 1821
1828 if (comp->driver->remove_order == order) { 1822 if (comp->driver->remove_order == order) {
1829 soc_remove_component(comp); 1823 soc_remove_component(comp);
1830 comp->auxiliary = 0; 1824 /* remove it from the card's aux_comp_list */
1825 list_del(&comp->card_aux_list);
1831 } 1826 }
1832 } 1827 }
1833 } 1828 }
@@ -1918,6 +1913,7 @@ int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
1918EXPORT_SYMBOL_GPL(snd_soc_runtime_set_dai_fmt); 1913EXPORT_SYMBOL_GPL(snd_soc_runtime_set_dai_fmt);
1919 1914
1920 1915
1916#ifdef CONFIG_DMI
1921/* Trim special characters, and replace '-' with '_' since '-' is used to 1917/* Trim special characters, and replace '-' with '_' since '-' is used to
1922 * separate different DMI fields in the card long name. Only number and 1918 * separate different DMI fields in the card long name. Only number and
1923 * alphabet characters and a few separator characters are kept. 1919 * alphabet characters and a few separator characters are kept.
@@ -2049,6 +2045,7 @@ int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour)
2049 return 0; 2045 return 0;
2050} 2046}
2051EXPORT_SYMBOL_GPL(snd_soc_set_dmi_name); 2047EXPORT_SYMBOL_GPL(snd_soc_set_dmi_name);
2048#endif /* CONFIG_DMI */
2052 2049
2053static int snd_soc_instantiate_card(struct snd_soc_card *card) 2050static int snd_soc_instantiate_card(struct snd_soc_card *card)
2054{ 2051{
@@ -2190,6 +2187,9 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
2190 snd_soc_dapm_add_routes(&card->dapm, card->of_dapm_routes, 2187 snd_soc_dapm_add_routes(&card->dapm, card->of_dapm_routes,
2191 card->num_of_dapm_routes); 2188 card->num_of_dapm_routes);
2192 2189
2190 /* try to set some sane longname if DMI is available */
2191 snd_soc_set_dmi_name(card, NULL);
2192
2193 snprintf(card->snd_card->shortname, sizeof(card->snd_card->shortname), 2193 snprintf(card->snd_card->shortname, sizeof(card->snd_card->shortname),
2194 "%s", card->name); 2194 "%s", card->name);
2195 snprintf(card->snd_card->longname, sizeof(card->snd_card->longname), 2195 snprintf(card->snd_card->longname, sizeof(card->snd_card->longname),
@@ -3139,7 +3139,7 @@ static int snd_soc_component_initialize(struct snd_soc_component *component,
3139 component->suspend = component->driver->suspend; 3139 component->suspend = component->driver->suspend;
3140 component->resume = component->driver->resume; 3140 component->resume = component->driver->resume;
3141 component->pcm_new = component->driver->pcm_new; 3141 component->pcm_new = component->driver->pcm_new;
3142 component->pcm_free= component->driver->pcm_free; 3142 component->pcm_free = component->driver->pcm_free;
3143 3143
3144 dapm = &component->dapm; 3144 dapm = &component->dapm;
3145 dapm->dev = dev; 3145 dapm->dev = dev;
@@ -3240,6 +3240,11 @@ static void snd_soc_component_cleanup(struct snd_soc_component *component)
3240 3240
3241static void snd_soc_component_del_unlocked(struct snd_soc_component *component) 3241static void snd_soc_component_del_unlocked(struct snd_soc_component *component)
3242{ 3242{
3243 struct snd_soc_card *card = component->card;
3244
3245 if (card)
3246 snd_soc_unregister_card(card);
3247
3243 list_del(&component->list); 3248 list_del(&component->list);
3244} 3249}
3245 3250
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index fbaa1bb41102..7daf21fee355 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -19,9 +19,28 @@
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/export.h> 21#include <linux/export.h>
22#include <linux/suspend.h>
22#include <trace/events/asoc.h> 23#include <trace/events/asoc.h>
23 24
24/** 25/**
26 * snd_soc_codec_set_jack - configure codec jack.
27 * @codec: CODEC
28 * @jack: structure to use for the jack
29 * @data: can be used if codec driver need extra data for configuring jack
30 *
31 * Configures and enables jack detection function.
32 */
33int snd_soc_codec_set_jack(struct snd_soc_codec *codec,
34 struct snd_soc_jack *jack, void *data)
35{
36 if (codec->driver->set_jack)
37 return codec->driver->set_jack(codec, jack, data);
38 else
39 return -EINVAL;
40}
41EXPORT_SYMBOL_GPL(snd_soc_codec_set_jack);
42
43/**
25 * snd_soc_card_jack_new - Create a new jack 44 * snd_soc_card_jack_new - Create a new jack
26 * @card: ASoC card 45 * @card: ASoC card
27 * @id: an identifying string for this jack 46 * @id: an identifying string for this jack
@@ -293,6 +312,27 @@ static void gpio_work(struct work_struct *work)
293 snd_soc_jack_gpio_detect(gpio); 312 snd_soc_jack_gpio_detect(gpio);
294} 313}
295 314
315static int snd_soc_jack_pm_notifier(struct notifier_block *nb,
316 unsigned long action, void *data)
317{
318 struct snd_soc_jack_gpio *gpio =
319 container_of(nb, struct snd_soc_jack_gpio, pm_notifier);
320
321 switch (action) {
322 case PM_POST_SUSPEND:
323 case PM_POST_HIBERNATION:
324 case PM_POST_RESTORE:
325 /*
326 * Use workqueue so we do not have to care about running
327 * concurrently with work triggered by the interrupt handler.
328 */
329 queue_delayed_work(system_power_efficient_wq, &gpio->work, 0);
330 break;
331 }
332
333 return NOTIFY_DONE;
334}
335
296/** 336/**
297 * snd_soc_jack_add_gpios - Associate GPIO pins with an ASoC jack 337 * snd_soc_jack_add_gpios - Associate GPIO pins with an ASoC jack
298 * 338 *
@@ -369,6 +409,13 @@ got_gpio:
369 i, ret); 409 i, ret);
370 } 410 }
371 411
412 /*
413 * Register PM notifier so we do not miss state transitions
414 * happening while system is asleep.
415 */
416 gpios[i].pm_notifier.notifier_call = snd_soc_jack_pm_notifier;
417 register_pm_notifier(&gpios[i].pm_notifier);
418
372 /* Expose GPIO value over sysfs for diagnostic purposes */ 419 /* Expose GPIO value over sysfs for diagnostic purposes */
373 gpiod_export(gpios[i].desc, false); 420 gpiod_export(gpios[i].desc, false);
374 421
@@ -428,6 +475,7 @@ void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
428 475
429 for (i = 0; i < count; i++) { 476 for (i = 0; i < count; i++) {
430 gpiod_unexport(gpios[i].desc); 477 gpiod_unexport(gpios[i].desc);
478 unregister_pm_notifier(&gpios[i].pm_notifier);
431 free_irq(gpiod_to_irq(gpios[i].desc), &gpios[i]); 479 free_irq(gpiod_to_irq(gpios[i].desc), &gpios[i]);
432 cancel_delayed_work_sync(&gpios[i].work); 480 cancel_delayed_work_sync(&gpios[i].work);
433 gpiod_put(gpios[i].desc); 481 gpiod_put(gpios[i].desc);
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 058bc99c6c34..002772e3ba2c 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -495,12 +495,13 @@ static void remove_widget(struct snd_soc_component *comp,
495 struct snd_kcontrol *kcontrol = w->kcontrols[i]; 495 struct snd_kcontrol *kcontrol = w->kcontrols[i];
496 struct soc_enum *se = 496 struct soc_enum *se =
497 (struct soc_enum *)kcontrol->private_value; 497 (struct soc_enum *)kcontrol->private_value;
498 int j;
498 499
499 snd_ctl_remove(card, kcontrol); 500 snd_ctl_remove(card, kcontrol);
500 501
501 kfree(se->dobj.control.dvalues); 502 kfree(se->dobj.control.dvalues);
502 for (i = 0; i < se->items; i++) 503 for (j = 0; j < se->items; j++)
503 kfree(se->dobj.control.dtexts[i]); 504 kfree(se->dobj.control.dtexts[j]);
504 505
505 kfree(se); 506 kfree(se);
506 } 507 }
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c
index d7e8dd46d2cc..d8b6936e544e 100644
--- a/sound/soc/sti/uniperif_player.c
+++ b/sound/soc/sti/uniperif_player.c
@@ -1074,7 +1074,7 @@ int uni_player_init(struct platform_device *pdev,
1074 player->clk = of_clk_get(pdev->dev.of_node, 0); 1074 player->clk = of_clk_get(pdev->dev.of_node, 0);
1075 if (IS_ERR(player->clk)) { 1075 if (IS_ERR(player->clk)) {
1076 dev_err(player->dev, "Failed to get clock\n"); 1076 dev_err(player->dev, "Failed to get clock\n");
1077 ret = PTR_ERR(player->clk); 1077 return PTR_ERR(player->clk);
1078 } 1078 }
1079 1079
1080 /* Select the frequency synthesizer clock */ 1080 /* Select the frequency synthesizer clock */
diff --git a/sound/soc/stm/Kconfig b/sound/soc/stm/Kconfig
new file mode 100644
index 000000000000..972970f0890a
--- /dev/null
+++ b/sound/soc/stm/Kconfig
@@ -0,0 +1,8 @@
1menuconfig SND_SOC_STM32
2 tristate "STMicroelectronics STM32 SOC audio support"
3 depends on ARCH_STM32 || COMPILE_TEST
4 depends on SND_SOC
5 select SND_SOC_GENERIC_DMAENGINE_PCM
6 select REGMAP_MMIO
7 help
8 Say Y if you want to enable ASoC-support for STM32
diff --git a/sound/soc/stm/Makefile b/sound/soc/stm/Makefile
new file mode 100644
index 000000000000..e466a4759698
--- /dev/null
+++ b/sound/soc/stm/Makefile
@@ -0,0 +1,6 @@
1# SAI
2snd-soc-stm32-sai-sub-objs := stm32_sai_sub.o
3obj-$(CONFIG_SND_SOC_STM32) += snd-soc-stm32-sai-sub.o
4
5snd-soc-stm32-sai-objs := stm32_sai.o
6obj-$(CONFIG_SND_SOC_STM32) += snd-soc-stm32-sai.o
diff --git a/sound/soc/stm/stm32_sai.c b/sound/soc/stm/stm32_sai.c
new file mode 100644
index 000000000000..2a27a26bf7a1
--- /dev/null
+++ b/sound/soc/stm/stm32_sai.c
@@ -0,0 +1,115 @@
1/*
2 * STM32 ALSA SoC Digital Audio Interface (SAI) driver.
3 *
4 * Copyright (C) 2016, STMicroelectronics - All Rights Reserved
5 * Author(s): Olivier Moysan <olivier.moysan@st.com> for STMicroelectronics.
6 *
7 * License terms: GPL V2.0.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published by
11 * the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
16 * details.
17 */
18
19#include <linux/clk.h>
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/of_platform.h>
23#include <linux/reset.h>
24
25#include <sound/dmaengine_pcm.h>
26#include <sound/core.h>
27
28#include "stm32_sai.h"
29
30static const struct of_device_id stm32_sai_ids[] = {
31 { .compatible = "st,stm32f4-sai", .data = (void *)SAI_STM32F4 },
32 {}
33};
34
35static int stm32_sai_probe(struct platform_device *pdev)
36{
37 struct device_node *np = pdev->dev.of_node;
38 struct stm32_sai_data *sai;
39 struct reset_control *rst;
40 struct resource *res;
41 void __iomem *base;
42 const struct of_device_id *of_id;
43
44 sai = devm_kzalloc(&pdev->dev, sizeof(*sai), GFP_KERNEL);
45 if (!sai)
46 return -ENOMEM;
47
48 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
49 base = devm_ioremap_resource(&pdev->dev, res);
50 if (IS_ERR(base))
51 return PTR_ERR(base);
52
53 of_id = of_match_device(stm32_sai_ids, &pdev->dev);
54 if (of_id)
55 sai->version = (enum stm32_sai_version)of_id->data;
56 else
57 return -EINVAL;
58
59 sai->clk_x8k = devm_clk_get(&pdev->dev, "x8k");
60 if (IS_ERR(sai->clk_x8k)) {
61 dev_err(&pdev->dev, "missing x8k parent clock\n");
62 return PTR_ERR(sai->clk_x8k);
63 }
64
65 sai->clk_x11k = devm_clk_get(&pdev->dev, "x11k");
66 if (IS_ERR(sai->clk_x11k)) {
67 dev_err(&pdev->dev, "missing x11k parent clock\n");
68 return PTR_ERR(sai->clk_x11k);
69 }
70
71 /* init irqs */
72 sai->irq = platform_get_irq(pdev, 0);
73 if (sai->irq < 0) {
74 dev_err(&pdev->dev, "no irq for node %s\n", pdev->name);
75 return sai->irq;
76 }
77
78 /* reset */
79 rst = reset_control_get(&pdev->dev, NULL);
80 if (!IS_ERR(rst)) {
81 reset_control_assert(rst);
82 udelay(2);
83 reset_control_deassert(rst);
84 }
85
86 sai->pdev = pdev;
87 platform_set_drvdata(pdev, sai);
88
89 return of_platform_populate(np, NULL, NULL, &pdev->dev);
90}
91
92static int stm32_sai_remove(struct platform_device *pdev)
93{
94 of_platform_depopulate(&pdev->dev);
95
96 return 0;
97}
98
99MODULE_DEVICE_TABLE(of, stm32_sai_ids);
100
101static struct platform_driver stm32_sai_driver = {
102 .driver = {
103 .name = "st,stm32-sai",
104 .of_match_table = stm32_sai_ids,
105 },
106 .probe = stm32_sai_probe,
107 .remove = stm32_sai_remove,
108};
109
110module_platform_driver(stm32_sai_driver);
111
112MODULE_DESCRIPTION("STM32 Soc SAI Interface");
113MODULE_AUTHOR("Olivier Moysan, <olivier.moysan@st.com>");
114MODULE_ALIAS("platform:st,stm32-sai");
115MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/stm/stm32_sai.h b/sound/soc/stm/stm32_sai.h
new file mode 100644
index 000000000000..a801fda5066f
--- /dev/null
+++ b/sound/soc/stm/stm32_sai.h
@@ -0,0 +1,200 @@
1/*
2 * STM32 ALSA SoC Digital Audio Interface (SAI) driver.
3 *
4 * Copyright (C) 2016, STMicroelectronics - All Rights Reserved
5 * Author(s): Olivier Moysan <olivier.moysan@st.com> for STMicroelectronics.
6 *
7 * License terms: GPL V2.0.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published by
11 * the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
16 * details.
17 */
18
19/******************** SAI Register Map **************************************/
20
21/* common register */
22#define STM_SAI_GCR 0x00
23
24/* Sub-block A&B registers offsets, relative to A&B sub-block addresses */
25#define STM_SAI_CR1_REGX 0x00 /* A offset: 0x04. B offset: 0x24 */
26#define STM_SAI_CR2_REGX 0x04
27#define STM_SAI_FRCR_REGX 0x08
28#define STM_SAI_SLOTR_REGX 0x0C
29#define STM_SAI_IMR_REGX 0x10
30#define STM_SAI_SR_REGX 0x14
31#define STM_SAI_CLRFR_REGX 0x18
32#define STM_SAI_DR_REGX 0x1C
33
34/******************** Bit definition for SAI_GCR register *******************/
35#define SAI_GCR_SYNCIN_SHIFT 0
36#define SAI_GCR_SYNCIN_MASK GENMASK(1, SAI_GCR_SYNCIN_SHIFT)
37#define SAI_GCR_SYNCIN_SET(x) ((x) << SAI_GCR_SYNCIN_SHIFT)
38
39#define SAI_GCR_SYNCOUT_SHIFT 4
40#define SAI_GCR_SYNCOUT_MASK GENMASK(5, SAI_GCR_SYNCOUT_SHIFT)
41#define SAI_GCR_SYNCOUT_SET(x) ((x) << SAI_GCR_SYNCOUT_SHIFT)
42
43/******************* Bit definition for SAI_XCR1 register *******************/
44#define SAI_XCR1_RX_TX_SHIFT 0
45#define SAI_XCR1_RX_TX BIT(SAI_XCR1_RX_TX_SHIFT)
46#define SAI_XCR1_SLAVE_SHIFT 1
47#define SAI_XCR1_SLAVE BIT(SAI_XCR1_SLAVE_SHIFT)
48
49#define SAI_XCR1_PRTCFG_SHIFT 2
50#define SAI_XCR1_PRTCFG_MASK GENMASK(3, SAI_XCR1_PRTCFG_SHIFT)
51#define SAI_XCR1_PRTCFG_SET(x) ((x) << SAI_XCR1_PRTCFG_SHIFT)
52
53#define SAI_XCR1_DS_SHIFT 5
54#define SAI_XCR1_DS_MASK GENMASK(7, SAI_XCR1_DS_SHIFT)
55#define SAI_XCR1_DS_SET(x) ((x) << SAI_XCR1_DS_SHIFT)
56
57#define SAI_XCR1_LSBFIRST_SHIFT 8
58#define SAI_XCR1_LSBFIRST BIT(SAI_XCR1_LSBFIRST_SHIFT)
59#define SAI_XCR1_CKSTR_SHIFT 9
60#define SAI_XCR1_CKSTR BIT(SAI_XCR1_CKSTR_SHIFT)
61
62#define SAI_XCR1_SYNCEN_SHIFT 10
63#define SAI_XCR1_SYNCEN_MASK GENMASK(11, SAI_XCR1_SYNCEN_SHIFT)
64#define SAI_XCR1_SYNCEN_SET(x) ((x) << SAI_XCR1_SYNCEN_SHIFT)
65
66#define SAI_XCR1_MONO_SHIFT 12
67#define SAI_XCR1_MONO BIT(SAI_XCR1_MONO_SHIFT)
68#define SAI_XCR1_OUTDRIV_SHIFT 13
69#define SAI_XCR1_OUTDRIV BIT(SAI_XCR1_OUTDRIV_SHIFT)
70#define SAI_XCR1_SAIEN_SHIFT 16
71#define SAI_XCR1_SAIEN BIT(SAI_XCR1_SAIEN_SHIFT)
72#define SAI_XCR1_DMAEN_SHIFT 17
73#define SAI_XCR1_DMAEN BIT(SAI_XCR1_DMAEN_SHIFT)
74#define SAI_XCR1_NODIV_SHIFT 19
75#define SAI_XCR1_NODIV BIT(SAI_XCR1_NODIV_SHIFT)
76
77#define SAI_XCR1_MCKDIV_SHIFT 20
78#define SAI_XCR1_MCKDIV_WIDTH 4
79#define SAI_XCR1_MCKDIV_MASK GENMASK(24, SAI_XCR1_MCKDIV_SHIFT)
80#define SAI_XCR1_MCKDIV_SET(x) ((x) << SAI_XCR1_MCKDIV_SHIFT)
81#define SAI_XCR1_MCKDIV_MAX ((1 << SAI_XCR1_MCKDIV_WIDTH) - 1)
82
83#define SAI_XCR1_OSR_SHIFT 26
84#define SAI_XCR1_OSR BIT(SAI_XCR1_OSR_SHIFT)
85
86/******************* Bit definition for SAI_XCR2 register *******************/
87#define SAI_XCR2_FTH_SHIFT 0
88#define SAI_XCR2_FTH_MASK GENMASK(2, SAI_XCR2_FTH_SHIFT)
89#define SAI_XCR2_FTH_SET(x) ((x) << SAI_XCR2_FTH_SHIFT)
90
91#define SAI_XCR2_FFLUSH_SHIFT 3
92#define SAI_XCR2_FFLUSH BIT(SAI_XCR2_FFLUSH_SHIFT)
93#define SAI_XCR2_TRIS_SHIFT 4
94#define SAI_XCR2_TRIS BIT(SAI_XCR2_TRIS_SHIFT)
95#define SAI_XCR2_MUTE_SHIFT 5
96#define SAI_XCR2_MUTE BIT(SAI_XCR2_MUTE_SHIFT)
97#define SAI_XCR2_MUTEVAL_SHIFT 6
98#define SAI_XCR2_MUTEVAL BIT(SAI_XCR2_MUTEVAL_SHIFT)
99
100#define SAI_XCR2_MUTECNT_SHIFT 7
101#define SAI_XCR2_MUTECNT_MASK GENMASK(12, SAI_XCR2_MUTECNT_SHIFT)
102#define SAI_XCR2_MUTECNT_SET(x) ((x) << SAI_XCR2_MUTECNT_SHIFT)
103
104#define SAI_XCR2_CPL_SHIFT 13
105#define SAI_XCR2_CPL BIT(SAI_XCR2_CPL_SHIFT)
106
107#define SAI_XCR2_COMP_SHIFT 14
108#define SAI_XCR2_COMP_MASK GENMASK(15, SAI_XCR2_COMP_SHIFT)
109#define SAI_XCR2_COMP_SET(x) ((x) << SAI_XCR2_COMP_SHIFT)
110
111/****************** Bit definition for SAI_XFRCR register *******************/
112#define SAI_XFRCR_FRL_SHIFT 0
113#define SAI_XFRCR_FRL_MASK GENMASK(7, SAI_XFRCR_FRL_SHIFT)
114#define SAI_XFRCR_FRL_SET(x) ((x) << SAI_XFRCR_FRL_SHIFT)
115
116#define SAI_XFRCR_FSALL_SHIFT 8
117#define SAI_XFRCR_FSALL_MASK GENMASK(14, SAI_XFRCR_FSALL_SHIFT)
118#define SAI_XFRCR_FSALL_SET(x) ((x) << SAI_XFRCR_FSALL_SHIFT)
119
120#define SAI_XFRCR_FSDEF_SHIFT 16
121#define SAI_XFRCR_FSDEF BIT(SAI_XFRCR_FSDEF_SHIFT)
122#define SAI_XFRCR_FSPOL_SHIFT 17
123#define SAI_XFRCR_FSPOL BIT(SAI_XFRCR_FSPOL_SHIFT)
124#define SAI_XFRCR_FSOFF_SHIFT 18
125#define SAI_XFRCR_FSOFF BIT(SAI_XFRCR_FSOFF_SHIFT)
126
127/****************** Bit definition for SAI_XSLOTR register ******************/
128
129#define SAI_XSLOTR_FBOFF_SHIFT 0
130#define SAI_XSLOTR_FBOFF_MASK GENMASK(4, SAI_XSLOTR_FBOFF_SHIFT)
131#define SAI_XSLOTR_FBOFF_SET(x) ((x) << SAI_XSLOTR_FBOFF_SHIFT)
132
133#define SAI_XSLOTR_SLOTSZ_SHIFT 6
134#define SAI_XSLOTR_SLOTSZ_MASK GENMASK(7, SAI_XSLOTR_SLOTSZ_SHIFT)
135#define SAI_XSLOTR_SLOTSZ_SET(x) ((x) << SAI_XSLOTR_SLOTSZ_SHIFT)
136
137#define SAI_XSLOTR_NBSLOT_SHIFT 8
138#define SAI_XSLOTR_NBSLOT_MASK GENMASK(11, SAI_XSLOTR_NBSLOT_SHIFT)
139#define SAI_XSLOTR_NBSLOT_SET(x) ((x) << SAI_XSLOTR_NBSLOT_SHIFT)
140
141#define SAI_XSLOTR_SLOTEN_SHIFT 16
142#define SAI_XSLOTR_SLOTEN_WIDTH 16
143#define SAI_XSLOTR_SLOTEN_MASK GENMASK(31, SAI_XSLOTR_SLOTEN_SHIFT)
144#define SAI_XSLOTR_SLOTEN_SET(x) ((x) << SAI_XSLOTR_SLOTEN_SHIFT)
145
146/******************* Bit definition for SAI_XIMR register *******************/
147#define SAI_XIMR_OVRUDRIE BIT(0)
148#define SAI_XIMR_MUTEDETIE BIT(1)
149#define SAI_XIMR_WCKCFGIE BIT(2)
150#define SAI_XIMR_FREQIE BIT(3)
151#define SAI_XIMR_CNRDYIE BIT(4)
152#define SAI_XIMR_AFSDETIE BIT(5)
153#define SAI_XIMR_LFSDETIE BIT(6)
154
155#define SAI_XIMR_SHIFT 0
156#define SAI_XIMR_MASK GENMASK(6, SAI_XIMR_SHIFT)
157
158/******************** Bit definition for SAI_XSR register *******************/
159#define SAI_XSR_OVRUDR BIT(0)
160#define SAI_XSR_MUTEDET BIT(1)
161#define SAI_XSR_WCKCFG BIT(2)
162#define SAI_XSR_FREQ BIT(3)
163#define SAI_XSR_CNRDY BIT(4)
164#define SAI_XSR_AFSDET BIT(5)
165#define SAI_XSR_LFSDET BIT(6)
166
167#define SAI_XSR_SHIFT 0
168#define SAI_XSR_MASK GENMASK(6, SAI_XSR_SHIFT)
169
170/****************** Bit definition for SAI_XCLRFR register ******************/
171#define SAI_XCLRFR_COVRUDR BIT(0)
172#define SAI_XCLRFR_CMUTEDET BIT(1)
173#define SAI_XCLRFR_CWCKCFG BIT(2)
174#define SAI_XCLRFR_CFREQ BIT(3)
175#define SAI_XCLRFR_CCNRDY BIT(4)
176#define SAI_XCLRFR_CAFSDET BIT(5)
177#define SAI_XCLRFR_CLFSDET BIT(6)
178
179#define SAI_XCLRFR_SHIFT 0
180#define SAI_XCLRFR_MASK GENMASK(6, SAI_XCLRFR_SHIFT)
181
182enum stm32_sai_version {
183 SAI_STM32F4
184};
185
186/**
187 * struct stm32_sai_data - private data of SAI instance driver
188 * @pdev: device data pointer
189 * @clk_x8k: SAI parent clock for sampling frequencies multiple of 8kHz
190 * @clk_x11k: SAI parent clock for sampling frequencies multiple of 11kHz
191 * @version: SOC version
192 * @irq: SAI interrupt line
193 */
194struct stm32_sai_data {
195 struct platform_device *pdev;
196 struct clk *clk_x8k;
197 struct clk *clk_x11k;
198 int version;
199 int irq;
200};
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
new file mode 100644
index 000000000000..ae4706ca265b
--- /dev/null
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -0,0 +1,884 @@
1/*
2 * STM32 ALSA SoC Digital Audio Interface (SAI) driver.
3 *
4 * Copyright (C) 2016, STMicroelectronics - All Rights Reserved
5 * Author(s): Olivier Moysan <olivier.moysan@st.com> for STMicroelectronics.
6 *
7 * License terms: GPL V2.0.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published by
11 * the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
16 * details.
17 */
18
19#include <linux/clk.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/of_irq.h>
23#include <linux/of_platform.h>
24#include <linux/regmap.h>
25
26#include <sound/core.h>
27#include <sound/dmaengine_pcm.h>
28#include <sound/pcm_params.h>
29
30#include "stm32_sai.h"
31
32#define SAI_FREE_PROTOCOL 0x0
33
34#define SAI_SLOT_SIZE_AUTO 0x0
35#define SAI_SLOT_SIZE_16 0x1
36#define SAI_SLOT_SIZE_32 0x2
37
38#define SAI_DATASIZE_8 0x2
39#define SAI_DATASIZE_10 0x3
40#define SAI_DATASIZE_16 0x4
41#define SAI_DATASIZE_20 0x5
42#define SAI_DATASIZE_24 0x6
43#define SAI_DATASIZE_32 0x7
44
45#define STM_SAI_FIFO_SIZE 8
46#define STM_SAI_DAI_NAME_SIZE 15
47
48#define STM_SAI_IS_PLAYBACK(ip) ((ip)->dir == SNDRV_PCM_STREAM_PLAYBACK)
49#define STM_SAI_IS_CAPTURE(ip) ((ip)->dir == SNDRV_PCM_STREAM_CAPTURE)
50
51#define STM_SAI_A_ID 0x0
52#define STM_SAI_B_ID 0x1
53
54#define STM_SAI_BLOCK_NAME(x) (((x)->id == STM_SAI_A_ID) ? "A" : "B")
55
56/**
57 * struct stm32_sai_sub_data - private data of SAI sub block (block A or B)
58 * @pdev: device data pointer
59 * @regmap: SAI register map pointer
60 * @dma_params: dma configuration data for rx or tx channel
61 * @cpu_dai_drv: DAI driver data pointer
62 * @cpu_dai: DAI runtime data pointer
63 * @substream: PCM substream data pointer
64 * @pdata: SAI block parent data pointer
65 * @sai_ck: kernel clock feeding the SAI clock generator
66 * @phys_addr: SAI registers physical base address
67 * @mclk_rate: SAI block master clock frequency (Hz). set at init
68 * @id: SAI sub block id corresponding to sub-block A or B
69 * @dir: SAI block direction (playback or capture). set at init
70 * @master: SAI block mode flag. (true=master, false=slave) set at init
71 * @fmt: SAI block format. relevant only for custom protocols. set at init
72 * @sync: SAI block synchronization mode. (none, internal or external)
73 * @fs_length: frame synchronization length. depends on protocol settings
74 * @slots: rx or tx slot number
75 * @slot_width: rx or tx slot width in bits
76 * @slot_mask: rx or tx active slots mask. set at init or at runtime
77 * @data_size: PCM data width. corresponds to PCM substream width.
78 */
79struct stm32_sai_sub_data {
80 struct platform_device *pdev;
81 struct regmap *regmap;
82 struct snd_dmaengine_dai_dma_data dma_params;
83 struct snd_soc_dai_driver *cpu_dai_drv;
84 struct snd_soc_dai *cpu_dai;
85 struct snd_pcm_substream *substream;
86 struct stm32_sai_data *pdata;
87 struct clk *sai_ck;
88 dma_addr_t phys_addr;
89 unsigned int mclk_rate;
90 unsigned int id;
91 int dir;
92 bool master;
93 int fmt;
94 int sync;
95 int fs_length;
96 int slots;
97 int slot_width;
98 int slot_mask;
99 int data_size;
100};
101
102enum stm32_sai_fifo_th {
103 STM_SAI_FIFO_TH_EMPTY,
104 STM_SAI_FIFO_TH_QUARTER,
105 STM_SAI_FIFO_TH_HALF,
106 STM_SAI_FIFO_TH_3_QUARTER,
107 STM_SAI_FIFO_TH_FULL,
108};
109
110static bool stm32_sai_sub_readable_reg(struct device *dev, unsigned int reg)
111{
112 switch (reg) {
113 case STM_SAI_CR1_REGX:
114 case STM_SAI_CR2_REGX:
115 case STM_SAI_FRCR_REGX:
116 case STM_SAI_SLOTR_REGX:
117 case STM_SAI_IMR_REGX:
118 case STM_SAI_SR_REGX:
119 case STM_SAI_CLRFR_REGX:
120 case STM_SAI_DR_REGX:
121 return true;
122 default:
123 return false;
124 }
125}
126
127static bool stm32_sai_sub_volatile_reg(struct device *dev, unsigned int reg)
128{
129 switch (reg) {
130 case STM_SAI_DR_REGX:
131 return true;
132 default:
133 return false;
134 }
135}
136
137static bool stm32_sai_sub_writeable_reg(struct device *dev, unsigned int reg)
138{
139 switch (reg) {
140 case STM_SAI_CR1_REGX:
141 case STM_SAI_CR2_REGX:
142 case STM_SAI_FRCR_REGX:
143 case STM_SAI_SLOTR_REGX:
144 case STM_SAI_IMR_REGX:
145 case STM_SAI_SR_REGX:
146 case STM_SAI_CLRFR_REGX:
147 case STM_SAI_DR_REGX:
148 return true;
149 default:
150 return false;
151 }
152}
153
154static const struct regmap_config stm32_sai_sub_regmap_config = {
155 .reg_bits = 32,
156 .reg_stride = 4,
157 .val_bits = 32,
158 .max_register = STM_SAI_DR_REGX,
159 .readable_reg = stm32_sai_sub_readable_reg,
160 .volatile_reg = stm32_sai_sub_volatile_reg,
161 .writeable_reg = stm32_sai_sub_writeable_reg,
162 .fast_io = true,
163};
164
165static irqreturn_t stm32_sai_isr(int irq, void *devid)
166{
167 struct stm32_sai_sub_data *sai = (struct stm32_sai_sub_data *)devid;
168 struct snd_pcm_substream *substream = sai->substream;
169 struct platform_device *pdev = sai->pdev;
170 unsigned int sr, imr, flags;
171 snd_pcm_state_t status = SNDRV_PCM_STATE_RUNNING;
172
173 regmap_read(sai->regmap, STM_SAI_IMR_REGX, &imr);
174 regmap_read(sai->regmap, STM_SAI_SR_REGX, &sr);
175
176 flags = sr & imr;
177 if (!flags)
178 return IRQ_NONE;
179
180 regmap_update_bits(sai->regmap, STM_SAI_CLRFR_REGX, SAI_XCLRFR_MASK,
181 SAI_XCLRFR_MASK);
182
183 if (flags & SAI_XIMR_OVRUDRIE) {
184 dev_err(&pdev->dev, "IT %s\n",
185 STM_SAI_IS_PLAYBACK(sai) ? "underrun" : "overrun");
186 status = SNDRV_PCM_STATE_XRUN;
187 }
188
189 if (flags & SAI_XIMR_MUTEDETIE)
190 dev_dbg(&pdev->dev, "IT mute detected\n");
191
192 if (flags & SAI_XIMR_WCKCFGIE) {
193 dev_err(&pdev->dev, "IT wrong clock configuration\n");
194 status = SNDRV_PCM_STATE_DISCONNECTED;
195 }
196
197 if (flags & SAI_XIMR_CNRDYIE)
198 dev_warn(&pdev->dev, "IT Codec not ready\n");
199
200 if (flags & SAI_XIMR_AFSDETIE) {
201 dev_warn(&pdev->dev, "IT Anticipated frame synchro\n");
202 status = SNDRV_PCM_STATE_XRUN;
203 }
204
205 if (flags & SAI_XIMR_LFSDETIE) {
206 dev_warn(&pdev->dev, "IT Late frame synchro\n");
207 status = SNDRV_PCM_STATE_XRUN;
208 }
209
210 if (status != SNDRV_PCM_STATE_RUNNING) {
211 snd_pcm_stream_lock(substream);
212 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
213 snd_pcm_stream_unlock(substream);
214 }
215
216 return IRQ_HANDLED;
217}
218
219static int stm32_sai_set_sysclk(struct snd_soc_dai *cpu_dai,
220 int clk_id, unsigned int freq, int dir)
221{
222 struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
223
224 if ((dir == SND_SOC_CLOCK_OUT) && sai->master) {
225 sai->mclk_rate = freq;
226 dev_dbg(cpu_dai->dev, "SAI MCLK frequency is %uHz\n", freq);
227 }
228
229 return 0;
230}
231
232static int stm32_sai_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai, u32 tx_mask,
233 u32 rx_mask, int slots, int slot_width)
234{
235 struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
236 int slotr, slotr_mask, slot_size;
237
238 dev_dbg(cpu_dai->dev, "masks tx/rx:%#x/%#x, slots:%d, width:%d\n",
239 tx_mask, rx_mask, slots, slot_width);
240
241 switch (slot_width) {
242 case 16:
243 slot_size = SAI_SLOT_SIZE_16;
244 break;
245 case 32:
246 slot_size = SAI_SLOT_SIZE_32;
247 break;
248 default:
249 slot_size = SAI_SLOT_SIZE_AUTO;
250 break;
251 }
252
253 slotr = SAI_XSLOTR_SLOTSZ_SET(slot_size) |
254 SAI_XSLOTR_NBSLOT_SET(slots - 1);
255 slotr_mask = SAI_XSLOTR_SLOTSZ_MASK | SAI_XSLOTR_NBSLOT_MASK;
256
257 /* tx/rx mask set in machine init, if slot number defined in DT */
258 if (STM_SAI_IS_PLAYBACK(sai)) {
259 sai->slot_mask = tx_mask;
260 slotr |= SAI_XSLOTR_SLOTEN_SET(tx_mask);
261 }
262
263 if (STM_SAI_IS_CAPTURE(sai)) {
264 sai->slot_mask = rx_mask;
265 slotr |= SAI_XSLOTR_SLOTEN_SET(rx_mask);
266 }
267
268 slotr_mask |= SAI_XSLOTR_SLOTEN_MASK;
269
270 regmap_update_bits(sai->regmap, STM_SAI_SLOTR_REGX, slotr_mask, slotr);
271
272 sai->slot_width = slot_width;
273 sai->slots = slots;
274
275 return 0;
276}
277
278static int stm32_sai_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
279{
280 struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
281 int cr1 = 0, frcr = 0;
282 int cr1_mask = 0, frcr_mask = 0;
283 int ret;
284
285 dev_dbg(cpu_dai->dev, "fmt %x\n", fmt);
286
287 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
288 /* SCK active high for all protocols */
289 case SND_SOC_DAIFMT_I2S:
290 cr1 |= SAI_XCR1_CKSTR;
291 frcr |= SAI_XFRCR_FSOFF | SAI_XFRCR_FSDEF;
292 break;
293 /* Left justified */
294 case SND_SOC_DAIFMT_MSB:
295 frcr |= SAI_XFRCR_FSPOL | SAI_XFRCR_FSDEF;
296 break;
297 /* Right justified */
298 case SND_SOC_DAIFMT_LSB:
299 frcr |= SAI_XFRCR_FSPOL | SAI_XFRCR_FSDEF;
300 break;
301 case SND_SOC_DAIFMT_DSP_A:
302 frcr |= SAI_XFRCR_FSPOL | SAI_XFRCR_FSOFF;
303 break;
304 case SND_SOC_DAIFMT_DSP_B:
305 frcr |= SAI_XFRCR_FSPOL;
306 break;
307 default:
308 dev_err(cpu_dai->dev, "Unsupported protocol %#x\n",
309 fmt & SND_SOC_DAIFMT_FORMAT_MASK);
310 return -EINVAL;
311 }
312
313 cr1_mask |= SAI_XCR1_PRTCFG_MASK | SAI_XCR1_CKSTR;
314 frcr_mask |= SAI_XFRCR_FSPOL | SAI_XFRCR_FSOFF |
315 SAI_XFRCR_FSDEF;
316
317 /* DAI clock strobing. Invert setting previously set */
318 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
319 case SND_SOC_DAIFMT_NB_NF:
320 break;
321 case SND_SOC_DAIFMT_IB_NF:
322 cr1 ^= SAI_XCR1_CKSTR;
323 break;
324 case SND_SOC_DAIFMT_NB_IF:
325 frcr ^= SAI_XFRCR_FSPOL;
326 break;
327 case SND_SOC_DAIFMT_IB_IF:
328 /* Invert fs & sck */
329 cr1 ^= SAI_XCR1_CKSTR;
330 frcr ^= SAI_XFRCR_FSPOL;
331 break;
332 default:
333 dev_err(cpu_dai->dev, "Unsupported strobing %#x\n",
334 fmt & SND_SOC_DAIFMT_INV_MASK);
335 return -EINVAL;
336 }
337 cr1_mask |= SAI_XCR1_CKSTR;
338 frcr_mask |= SAI_XFRCR_FSPOL;
339
340 regmap_update_bits(sai->regmap, STM_SAI_FRCR_REGX, frcr_mask, frcr);
341
342 /* DAI clock master masks */
343 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
344 case SND_SOC_DAIFMT_CBM_CFM:
345 /* codec is master */
346 cr1 |= SAI_XCR1_SLAVE;
347 sai->master = false;
348 break;
349 case SND_SOC_DAIFMT_CBS_CFS:
350 sai->master = true;
351 break;
352 default:
353 dev_err(cpu_dai->dev, "Unsupported mode %#x\n",
354 fmt & SND_SOC_DAIFMT_MASTER_MASK);
355 return -EINVAL;
356 }
357 cr1_mask |= SAI_XCR1_SLAVE;
358
359 ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, cr1_mask, cr1);
360 if (ret < 0) {
361 dev_err(cpu_dai->dev, "Failed to update CR1 register\n");
362 return ret;
363 }
364
365 sai->fmt = fmt;
366
367 return 0;
368}
369
370static int stm32_sai_startup(struct snd_pcm_substream *substream,
371 struct snd_soc_dai *cpu_dai)
372{
373 struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
374 int imr, cr2, ret;
375
376 sai->substream = substream;
377
378 ret = clk_prepare_enable(sai->sai_ck);
379 if (ret < 0) {
380 dev_err(cpu_dai->dev, "failed to enable clock: %d\n", ret);
381 return ret;
382 }
383
384 /* Enable ITs */
385 regmap_update_bits(sai->regmap, STM_SAI_SR_REGX,
386 SAI_XSR_MASK, (unsigned int)~SAI_XSR_MASK);
387
388 regmap_update_bits(sai->regmap, STM_SAI_CLRFR_REGX,
389 SAI_XCLRFR_MASK, SAI_XCLRFR_MASK);
390
391 imr = SAI_XIMR_OVRUDRIE;
392 if (STM_SAI_IS_CAPTURE(sai)) {
393 regmap_read(sai->regmap, STM_SAI_CR2_REGX, &cr2);
394 if (cr2 & SAI_XCR2_MUTECNT_MASK)
395 imr |= SAI_XIMR_MUTEDETIE;
396 }
397
398 if (sai->master)
399 imr |= SAI_XIMR_WCKCFGIE;
400 else
401 imr |= SAI_XIMR_AFSDETIE | SAI_XIMR_LFSDETIE;
402
403 regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX,
404 SAI_XIMR_MASK, imr);
405
406 return 0;
407}
408
409static int stm32_sai_set_config(struct snd_soc_dai *cpu_dai,
410 struct snd_pcm_substream *substream,
411 struct snd_pcm_hw_params *params)
412{
413 struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
414 int cr1, cr1_mask, ret;
415 int fth = STM_SAI_FIFO_TH_HALF;
416
417 /* FIFO config */
418 regmap_update_bits(sai->regmap, STM_SAI_CR2_REGX,
419 SAI_XCR2_FFLUSH | SAI_XCR2_FTH_MASK,
420 SAI_XCR2_FFLUSH | SAI_XCR2_FTH_SET(fth));
421
422 /* Mode, data format and channel config */
423 cr1 = SAI_XCR1_PRTCFG_SET(SAI_FREE_PROTOCOL);
424 switch (params_format(params)) {
425 case SNDRV_PCM_FORMAT_S8:
426 cr1 |= SAI_XCR1_DS_SET(SAI_DATASIZE_8);
427 break;
428 case SNDRV_PCM_FORMAT_S16_LE:
429 cr1 |= SAI_XCR1_DS_SET(SAI_DATASIZE_16);
430 break;
431 case SNDRV_PCM_FORMAT_S32_LE:
432 cr1 |= SAI_XCR1_DS_SET(SAI_DATASIZE_32);
433 break;
434 default:
435 dev_err(cpu_dai->dev, "Data format not supported");
436 return -EINVAL;
437 }
438 cr1_mask = SAI_XCR1_DS_MASK | SAI_XCR1_PRTCFG_MASK;
439
440 cr1_mask |= SAI_XCR1_RX_TX;
441 if (STM_SAI_IS_CAPTURE(sai))
442 cr1 |= SAI_XCR1_RX_TX;
443
444 cr1_mask |= SAI_XCR1_MONO;
445 if ((sai->slots == 2) && (params_channels(params) == 1))
446 cr1 |= SAI_XCR1_MONO;
447
448 ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, cr1_mask, cr1);
449 if (ret < 0) {
450 dev_err(cpu_dai->dev, "Failed to update CR1 register\n");
451 return ret;
452 }
453
454 /* DMA config */
455 sai->dma_params.maxburst = STM_SAI_FIFO_SIZE * fth / sizeof(u32);
456 snd_soc_dai_set_dma_data(cpu_dai, substream, (void *)&sai->dma_params);
457
458 return 0;
459}
460
461static int stm32_sai_set_slots(struct snd_soc_dai *cpu_dai)
462{
463 struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
464 int slotr, slot_sz;
465
466 regmap_read(sai->regmap, STM_SAI_SLOTR_REGX, &slotr);
467
468 /*
469 * If SLOTSZ is set to auto in SLOTR, align slot width on data size
470 * By default slot width = data size, if not forced from DT
471 */
472 slot_sz = slotr & SAI_XSLOTR_SLOTSZ_MASK;
473 if (slot_sz == SAI_XSLOTR_SLOTSZ_SET(SAI_SLOT_SIZE_AUTO))
474 sai->slot_width = sai->data_size;
475
476 if (sai->slot_width < sai->data_size) {
477 dev_err(cpu_dai->dev,
478 "Data size %d larger than slot width\n",
479 sai->data_size);
480 return -EINVAL;
481 }
482
483 /* Slot number is set to 2, if not specified in DT */
484 if (!sai->slots)
485 sai->slots = 2;
486
487 /* The number of slots in the audio frame is equal to NBSLOT[3:0] + 1*/
488 regmap_update_bits(sai->regmap, STM_SAI_SLOTR_REGX,
489 SAI_XSLOTR_NBSLOT_MASK,
490 SAI_XSLOTR_NBSLOT_SET((sai->slots - 1)));
491
492 /* Set default slots mask if not already set from DT */
493 if (!(slotr & SAI_XSLOTR_SLOTEN_MASK)) {
494 sai->slot_mask = (1 << sai->slots) - 1;
495 regmap_update_bits(sai->regmap,
496 STM_SAI_SLOTR_REGX, SAI_XSLOTR_SLOTEN_MASK,
497 SAI_XSLOTR_SLOTEN_SET(sai->slot_mask));
498 }
499
500 dev_dbg(cpu_dai->dev, "slots %d, slot width %d\n",
501 sai->slots, sai->slot_width);
502
503 return 0;
504}
505
506static void stm32_sai_set_frame(struct snd_soc_dai *cpu_dai)
507{
508 struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
509 int fs_active, offset, format;
510 int frcr, frcr_mask;
511
512 format = sai->fmt & SND_SOC_DAIFMT_FORMAT_MASK;
513 sai->fs_length = sai->slot_width * sai->slots;
514
515 fs_active = sai->fs_length / 2;
516 if ((format == SND_SOC_DAIFMT_DSP_A) ||
517 (format == SND_SOC_DAIFMT_DSP_B))
518 fs_active = 1;
519
520 frcr = SAI_XFRCR_FRL_SET((sai->fs_length - 1));
521 frcr |= SAI_XFRCR_FSALL_SET((fs_active - 1));
522 frcr_mask = SAI_XFRCR_FRL_MASK | SAI_XFRCR_FSALL_MASK;
523
524 dev_dbg(cpu_dai->dev, "frame length %d, frame active %d\n",
525 sai->fs_length, fs_active);
526
527 regmap_update_bits(sai->regmap, STM_SAI_FRCR_REGX, frcr_mask, frcr);
528
529 if ((sai->fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_LSB) {
530 offset = sai->slot_width - sai->data_size;
531
532 regmap_update_bits(sai->regmap, STM_SAI_SLOTR_REGX,
533 SAI_XSLOTR_FBOFF_MASK,
534 SAI_XSLOTR_FBOFF_SET(offset));
535 }
536}
537
538static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
539 struct snd_pcm_hw_params *params)
540{
541 struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
542 int cr1, mask, div = 0;
543 int sai_clk_rate, ret;
544
545 if (!sai->mclk_rate) {
546 dev_err(cpu_dai->dev, "Mclk rate is null\n");
547 return -EINVAL;
548 }
549
550 if (!(params_rate(params) % 11025))
551 clk_set_parent(sai->sai_ck, sai->pdata->clk_x11k);
552 else
553 clk_set_parent(sai->sai_ck, sai->pdata->clk_x8k);
554 sai_clk_rate = clk_get_rate(sai->sai_ck);
555
556 /*
557 * mclk_rate = 256 * fs
558 * MCKDIV = 0 if sai_ck < 3/2 * mclk_rate
559 * MCKDIV = sai_ck / (2 * mclk_rate) otherwise
560 */
561 if (2 * sai_clk_rate >= 3 * sai->mclk_rate)
562 div = DIV_ROUND_CLOSEST(sai_clk_rate, 2 * sai->mclk_rate);
563
564 if (div > SAI_XCR1_MCKDIV_MAX) {
565 dev_err(cpu_dai->dev, "Divider %d out of range\n", div);
566 return -EINVAL;
567 }
568 dev_dbg(cpu_dai->dev, "SAI clock %d, divider %d\n", sai_clk_rate, div);
569
570 mask = SAI_XCR1_MCKDIV_MASK;
571 cr1 = SAI_XCR1_MCKDIV_SET(div);
572 ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, mask, cr1);
573 if (ret < 0) {
574 dev_err(cpu_dai->dev, "Failed to update CR1 register\n");
575 return ret;
576 }
577
578 return 0;
579}
580
581static int stm32_sai_hw_params(struct snd_pcm_substream *substream,
582 struct snd_pcm_hw_params *params,
583 struct snd_soc_dai *cpu_dai)
584{
585 struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
586 int ret;
587
588 sai->data_size = params_width(params);
589
590 ret = stm32_sai_set_slots(cpu_dai);
591 if (ret < 0)
592 return ret;
593 stm32_sai_set_frame(cpu_dai);
594
595 ret = stm32_sai_set_config(cpu_dai, substream, params);
596 if (ret)
597 return ret;
598
599 if (sai->master)
600 ret = stm32_sai_configure_clock(cpu_dai, params);
601
602 return ret;
603}
604
605static int stm32_sai_trigger(struct snd_pcm_substream *substream, int cmd,
606 struct snd_soc_dai *cpu_dai)
607{
608 struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
609 int ret;
610
611 switch (cmd) {
612 case SNDRV_PCM_TRIGGER_START:
613 case SNDRV_PCM_TRIGGER_RESUME:
614 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
615 dev_dbg(cpu_dai->dev, "Enable DMA and SAI\n");
616
617 regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
618 SAI_XCR1_DMAEN, SAI_XCR1_DMAEN);
619
620 /* Enable SAI */
621 ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
622 SAI_XCR1_SAIEN, SAI_XCR1_SAIEN);
623 if (ret < 0)
624 dev_err(cpu_dai->dev, "Failed to update CR1 register\n");
625 break;
626 case SNDRV_PCM_TRIGGER_SUSPEND:
627 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
628 case SNDRV_PCM_TRIGGER_STOP:
629 dev_dbg(cpu_dai->dev, "Disable DMA and SAI\n");
630
631 regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
632 SAI_XCR1_DMAEN,
633 (unsigned int)~SAI_XCR1_DMAEN);
634
635 ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
636 SAI_XCR1_SAIEN,
637 (unsigned int)~SAI_XCR1_SAIEN);
638 if (ret < 0)
639 dev_err(cpu_dai->dev, "Failed to update CR1 register\n");
640 break;
641 default:
642 return -EINVAL;
643 }
644
645 return ret;
646}
647
648static void stm32_sai_shutdown(struct snd_pcm_substream *substream,
649 struct snd_soc_dai *cpu_dai)
650{
651 struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
652
653 regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX, SAI_XIMR_MASK, 0);
654
655 clk_disable_unprepare(sai->sai_ck);
656 sai->substream = NULL;
657}
658
659static int stm32_sai_dai_probe(struct snd_soc_dai *cpu_dai)
660{
661 struct stm32_sai_sub_data *sai = dev_get_drvdata(cpu_dai->dev);
662
663 sai->dma_params.addr = (dma_addr_t)(sai->phys_addr + STM_SAI_DR_REGX);
664 sai->dma_params.maxburst = 1;
665 /* Buswidth will be set by framework at runtime */
666 sai->dma_params.addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
667
668 if (STM_SAI_IS_PLAYBACK(sai))
669 snd_soc_dai_init_dma_data(cpu_dai, &sai->dma_params, NULL);
670 else
671 snd_soc_dai_init_dma_data(cpu_dai, NULL, &sai->dma_params);
672
673 return 0;
674}
675
676static const struct snd_soc_dai_ops stm32_sai_pcm_dai_ops = {
677 .set_sysclk = stm32_sai_set_sysclk,
678 .set_fmt = stm32_sai_set_dai_fmt,
679 .set_tdm_slot = stm32_sai_set_dai_tdm_slot,
680 .startup = stm32_sai_startup,
681 .hw_params = stm32_sai_hw_params,
682 .trigger = stm32_sai_trigger,
683 .shutdown = stm32_sai_shutdown,
684};
685
686static const struct snd_pcm_hardware stm32_sai_pcm_hw = {
687 .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP,
688 .buffer_bytes_max = 8 * PAGE_SIZE,
689 .period_bytes_min = 1024, /* 5ms at 48kHz */
690 .period_bytes_max = PAGE_SIZE,
691 .periods_min = 2,
692 .periods_max = 8,
693};
694
695static struct snd_soc_dai_driver stm32_sai_playback_dai[] = {
696{
697 .probe = stm32_sai_dai_probe,
698 .id = 1, /* avoid call to fmt_single_name() */
699 .playback = {
700 .channels_min = 1,
701 .channels_max = 2,
702 .rate_min = 8000,
703 .rate_max = 192000,
704 .rates = SNDRV_PCM_RATE_CONTINUOUS,
705 /* DMA does not support 24 bits transfers */
706 .formats =
707 SNDRV_PCM_FMTBIT_S8 |
708 SNDRV_PCM_FMTBIT_S16_LE |
709 SNDRV_PCM_FMTBIT_S32_LE,
710 },
711 .ops = &stm32_sai_pcm_dai_ops,
712 }
713};
714
715static struct snd_soc_dai_driver stm32_sai_capture_dai[] = {
716{
717 .probe = stm32_sai_dai_probe,
718 .id = 1, /* avoid call to fmt_single_name() */
719 .capture = {
720 .channels_min = 1,
721 .channels_max = 2,
722 .rate_min = 8000,
723 .rate_max = 192000,
724 .rates = SNDRV_PCM_RATE_CONTINUOUS,
725 /* DMA does not support 24 bits transfers */
726 .formats =
727 SNDRV_PCM_FMTBIT_S8 |
728 SNDRV_PCM_FMTBIT_S16_LE |
729 SNDRV_PCM_FMTBIT_S32_LE,
730 },
731 .ops = &stm32_sai_pcm_dai_ops,
732 }
733};
734
735static const struct snd_dmaengine_pcm_config stm32_sai_pcm_config = {
736 .pcm_hardware = &stm32_sai_pcm_hw,
737 .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
738};
739
740static const struct snd_soc_component_driver stm32_component = {
741 .name = "stm32-sai",
742};
743
744static const struct of_device_id stm32_sai_sub_ids[] = {
745 { .compatible = "st,stm32-sai-sub-a",
746 .data = (void *)STM_SAI_A_ID},
747 { .compatible = "st,stm32-sai-sub-b",
748 .data = (void *)STM_SAI_B_ID},
749 {}
750};
751MODULE_DEVICE_TABLE(of, stm32_sai_sub_ids);
752
753static int stm32_sai_sub_parse_of(struct platform_device *pdev,
754 struct stm32_sai_sub_data *sai)
755{
756 struct device_node *np = pdev->dev.of_node;
757 struct resource *res;
758 void __iomem *base;
759
760 if (!np)
761 return -ENODEV;
762
763 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
764
765 dev_err(&pdev->dev, "res %pr\n", res);
766
767 base = devm_ioremap_resource(&pdev->dev, res);
768 if (IS_ERR(base))
769 return PTR_ERR(base);
770
771 sai->phys_addr = res->start;
772 sai->regmap = devm_regmap_init_mmio(&pdev->dev, base,
773 &stm32_sai_sub_regmap_config);
774
775 /* Get direction property */
776 if (of_property_match_string(np, "dma-names", "tx") >= 0) {
777 sai->dir = SNDRV_PCM_STREAM_PLAYBACK;
778 } else if (of_property_match_string(np, "dma-names", "rx") >= 0) {
779 sai->dir = SNDRV_PCM_STREAM_CAPTURE;
780 } else {
781 dev_err(&pdev->dev, "Unsupported direction\n");
782 return -EINVAL;
783 }
784
785 sai->sai_ck = devm_clk_get(&pdev->dev, "sai_ck");
786 if (IS_ERR(sai->sai_ck)) {
787 dev_err(&pdev->dev, "missing kernel clock sai_ck\n");
788 return PTR_ERR(sai->sai_ck);
789 }
790
791 return 0;
792}
793
794static int stm32_sai_sub_dais_init(struct platform_device *pdev,
795 struct stm32_sai_sub_data *sai)
796{
797 sai->cpu_dai_drv = devm_kzalloc(&pdev->dev,
798 sizeof(struct snd_soc_dai_driver),
799 GFP_KERNEL);
800 if (!sai->cpu_dai_drv)
801 return -ENOMEM;
802
803 sai->cpu_dai_drv->name = dev_name(&pdev->dev);
804 if (STM_SAI_IS_PLAYBACK(sai)) {
805 memcpy(sai->cpu_dai_drv, &stm32_sai_playback_dai,
806 sizeof(stm32_sai_playback_dai));
807 sai->cpu_dai_drv->playback.stream_name = sai->cpu_dai_drv->name;
808 } else {
809 memcpy(sai->cpu_dai_drv, &stm32_sai_capture_dai,
810 sizeof(stm32_sai_capture_dai));
811 sai->cpu_dai_drv->capture.stream_name = sai->cpu_dai_drv->name;
812 }
813
814 return 0;
815}
816
817static int stm32_sai_sub_probe(struct platform_device *pdev)
818{
819 struct stm32_sai_sub_data *sai;
820 const struct of_device_id *of_id;
821 int ret;
822
823 sai = devm_kzalloc(&pdev->dev, sizeof(*sai), GFP_KERNEL);
824 if (!sai)
825 return -ENOMEM;
826
827 of_id = of_match_device(stm32_sai_sub_ids, &pdev->dev);
828 if (!of_id)
829 return -EINVAL;
830 sai->id = (uintptr_t)of_id->data;
831
832 sai->pdev = pdev;
833 platform_set_drvdata(pdev, sai);
834
835 sai->pdata = dev_get_drvdata(pdev->dev.parent);
836 if (!sai->pdata) {
837 dev_err(&pdev->dev, "Parent device data not available\n");
838 return -EINVAL;
839 }
840
841 ret = stm32_sai_sub_parse_of(pdev, sai);
842 if (ret)
843 return ret;
844
845 ret = stm32_sai_sub_dais_init(pdev, sai);
846 if (ret)
847 return ret;
848
849 ret = devm_request_irq(&pdev->dev, sai->pdata->irq, stm32_sai_isr,
850 IRQF_SHARED, dev_name(&pdev->dev), sai);
851 if (ret) {
852 dev_err(&pdev->dev, "irq request returned %d\n", ret);
853 return ret;
854 }
855
856 ret = devm_snd_soc_register_component(&pdev->dev, &stm32_component,
857 sai->cpu_dai_drv, 1);
858 if (ret)
859 return ret;
860
861 ret = devm_snd_dmaengine_pcm_register(&pdev->dev,
862 &stm32_sai_pcm_config, 0);
863 if (ret) {
864 dev_err(&pdev->dev, "could not register pcm dma\n");
865 return ret;
866 }
867
868 return 0;
869}
870
871static struct platform_driver stm32_sai_sub_driver = {
872 .driver = {
873 .name = "st,stm32-sai-sub",
874 .of_match_table = stm32_sai_sub_ids,
875 },
876 .probe = stm32_sai_sub_probe,
877};
878
879module_platform_driver(stm32_sai_sub_driver);
880
881MODULE_DESCRIPTION("STM32 Soc SAI sub-block Interface");
882MODULE_AUTHOR("Olivier Moysan, <olivier.moysan@st.com>");
883MODULE_ALIAS("platform:st,stm32-sai-sub");
884MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/sunxi/sun8i-codec-analog.c b/sound/soc/sunxi/sun8i-codec-analog.c
index 72331332b72e..6c17c99c2c8d 100644
--- a/sound/soc/sunxi/sun8i-codec-analog.c
+++ b/sound/soc/sunxi/sun8i-codec-analog.c
@@ -252,24 +252,15 @@ static const DECLARE_TLV_DB_RANGE(sun8i_codec_mic_gain_scale,
252); 252);
253 253
254static const struct snd_kcontrol_new sun8i_codec_common_controls[] = { 254static const struct snd_kcontrol_new sun8i_codec_common_controls[] = {
255 /* Mixer pre-gains */ 255 /* Mixer pre-gain */
256 SOC_SINGLE_TLV("Line In Playback Volume", SUN8I_ADDA_LINEIN_GCTRL,
257 SUN8I_ADDA_LINEIN_GCTRL_LINEING,
258 0x7, 0, sun8i_codec_out_mixer_pregain_scale),
259 SOC_SINGLE_TLV("Mic1 Playback Volume", SUN8I_ADDA_MICIN_GCTRL, 256 SOC_SINGLE_TLV("Mic1 Playback Volume", SUN8I_ADDA_MICIN_GCTRL,
260 SUN8I_ADDA_MICIN_GCTRL_MIC1G, 257 SUN8I_ADDA_MICIN_GCTRL_MIC1G,
261 0x7, 0, sun8i_codec_out_mixer_pregain_scale), 258 0x7, 0, sun8i_codec_out_mixer_pregain_scale),
262 SOC_SINGLE_TLV("Mic2 Playback Volume",
263 SUN8I_ADDA_MICIN_GCTRL, SUN8I_ADDA_MICIN_GCTRL_MIC2G,
264 0x7, 0, sun8i_codec_out_mixer_pregain_scale),
265 259
266 /* Microphone Amp boost gains */ 260 /* Microphone Amp boost gain */
267 SOC_SINGLE_TLV("Mic1 Boost Volume", SUN8I_ADDA_MIC1G_MICBIAS_CTRL, 261 SOC_SINGLE_TLV("Mic1 Boost Volume", SUN8I_ADDA_MIC1G_MICBIAS_CTRL,
268 SUN8I_ADDA_MIC1G_MICBIAS_CTRL_MIC1BOOST, 0x7, 0, 262 SUN8I_ADDA_MIC1G_MICBIAS_CTRL_MIC1BOOST, 0x7, 0,
269 sun8i_codec_mic_gain_scale), 263 sun8i_codec_mic_gain_scale),
270 SOC_SINGLE_TLV("Mic2 Boost Volume", SUN8I_ADDA_MIC2G_CTRL,
271 SUN8I_ADDA_MIC2G_CTRL_MIC2BOOST, 0x7, 0,
272 sun8i_codec_mic_gain_scale),
273 264
274 /* ADC */ 265 /* ADC */
275 SOC_SINGLE_TLV("ADC Gain Capture Volume", SUN8I_ADDA_ADC_AP_EN, 266 SOC_SINGLE_TLV("ADC Gain Capture Volume", SUN8I_ADDA_ADC_AP_EN,
@@ -295,12 +286,8 @@ static const struct snd_soc_dapm_widget sun8i_codec_common_widgets[] = {
295 * stream widgets at the card level. 286 * stream widgets at the card level.
296 */ 287 */
297 288
298 /* Line In */ 289 /* Microphone input */
299 SND_SOC_DAPM_INPUT("LINEIN"),
300
301 /* Microphone inputs */
302 SND_SOC_DAPM_INPUT("MIC1"), 290 SND_SOC_DAPM_INPUT("MIC1"),
303 SND_SOC_DAPM_INPUT("MIC2"),
304 291
305 /* Microphone Bias */ 292 /* Microphone Bias */
306 SND_SOC_DAPM_SUPPLY("MBIAS", SUN8I_ADDA_MIC1G_MICBIAS_CTRL, 293 SND_SOC_DAPM_SUPPLY("MBIAS", SUN8I_ADDA_MIC1G_MICBIAS_CTRL,
@@ -310,8 +297,6 @@ static const struct snd_soc_dapm_widget sun8i_codec_common_widgets[] = {
310 /* Mic input path */ 297 /* Mic input path */
311 SND_SOC_DAPM_PGA("Mic1 Amplifier", SUN8I_ADDA_MIC1G_MICBIAS_CTRL, 298 SND_SOC_DAPM_PGA("Mic1 Amplifier", SUN8I_ADDA_MIC1G_MICBIAS_CTRL,
312 SUN8I_ADDA_MIC1G_MICBIAS_CTRL_MIC1AMPEN, 0, NULL, 0), 299 SUN8I_ADDA_MIC1G_MICBIAS_CTRL_MIC1AMPEN, 0, NULL, 0),
313 SND_SOC_DAPM_PGA("Mic2 Amplifier", SUN8I_ADDA_MIC2G_CTRL,
314 SUN8I_ADDA_MIC2G_CTRL_MIC2AMPEN, 0, NULL, 0),
315 300
316 /* Mixers */ 301 /* Mixers */
317 SND_SOC_DAPM_MIXER("Left Mixer", SUN8I_ADDA_DAC_PA_SRC, 302 SND_SOC_DAPM_MIXER("Left Mixer", SUN8I_ADDA_DAC_PA_SRC,
@@ -335,35 +320,26 @@ static const struct snd_soc_dapm_widget sun8i_codec_common_widgets[] = {
335static const struct snd_soc_dapm_route sun8i_codec_common_routes[] = { 320static const struct snd_soc_dapm_route sun8i_codec_common_routes[] = {
336 /* Microphone Routes */ 321 /* Microphone Routes */
337 { "Mic1 Amplifier", NULL, "MIC1"}, 322 { "Mic1 Amplifier", NULL, "MIC1"},
338 { "Mic2 Amplifier", NULL, "MIC2"},
339 323
340 /* Left Mixer Routes */ 324 /* Left Mixer Routes */
341 { "Left Mixer", "DAC Playback Switch", "Left DAC" }, 325 { "Left Mixer", "DAC Playback Switch", "Left DAC" },
342 { "Left Mixer", "DAC Reversed Playback Switch", "Right DAC" }, 326 { "Left Mixer", "DAC Reversed Playback Switch", "Right DAC" },
343 { "Left Mixer", "Line In Playback Switch", "LINEIN" },
344 { "Left Mixer", "Mic1 Playback Switch", "Mic1 Amplifier" }, 327 { "Left Mixer", "Mic1 Playback Switch", "Mic1 Amplifier" },
345 { "Left Mixer", "Mic2 Playback Switch", "Mic2 Amplifier" },
346 328
347 /* Right Mixer Routes */ 329 /* Right Mixer Routes */
348 { "Right Mixer", "DAC Playback Switch", "Right DAC" }, 330 { "Right Mixer", "DAC Playback Switch", "Right DAC" },
349 { "Right Mixer", "DAC Reversed Playback Switch", "Left DAC" }, 331 { "Right Mixer", "DAC Reversed Playback Switch", "Left DAC" },
350 { "Right Mixer", "Line In Playback Switch", "LINEIN" },
351 { "Right Mixer", "Mic1 Playback Switch", "Mic1 Amplifier" }, 332 { "Right Mixer", "Mic1 Playback Switch", "Mic1 Amplifier" },
352 { "Right Mixer", "Mic2 Playback Switch", "Mic2 Amplifier" },
353 333
354 /* Left ADC Mixer Routes */ 334 /* Left ADC Mixer Routes */
355 { "Left ADC Mixer", "Mixer Capture Switch", "Left Mixer" }, 335 { "Left ADC Mixer", "Mixer Capture Switch", "Left Mixer" },
356 { "Left ADC Mixer", "Mixer Reversed Capture Switch", "Right Mixer" }, 336 { "Left ADC Mixer", "Mixer Reversed Capture Switch", "Right Mixer" },
357 { "Left ADC Mixer", "Line In Capture Switch", "LINEIN" },
358 { "Left ADC Mixer", "Mic1 Capture Switch", "Mic1 Amplifier" }, 337 { "Left ADC Mixer", "Mic1 Capture Switch", "Mic1 Amplifier" },
359 { "Left ADC Mixer", "Mic2 Capture Switch", "Mic2 Amplifier" },
360 338
361 /* Right ADC Mixer Routes */ 339 /* Right ADC Mixer Routes */
362 { "Right ADC Mixer", "Mixer Capture Switch", "Right Mixer" }, 340 { "Right ADC Mixer", "Mixer Capture Switch", "Right Mixer" },
363 { "Right ADC Mixer", "Mixer Reversed Capture Switch", "Left Mixer" }, 341 { "Right ADC Mixer", "Mixer Reversed Capture Switch", "Left Mixer" },
364 { "Right ADC Mixer", "Line In Capture Switch", "LINEIN" },
365 { "Right ADC Mixer", "Mic1 Capture Switch", "Mic1 Amplifier" }, 342 { "Right ADC Mixer", "Mic1 Capture Switch", "Mic1 Amplifier" },
366 { "Right ADC Mixer", "Mic2 Capture Switch", "Mic2 Amplifier" },
367 343
368 /* ADC Routes */ 344 /* ADC Routes */
369 { "Left ADC", NULL, "Left ADC Mixer" }, 345 { "Left ADC", NULL, "Left ADC Mixer" },
@@ -498,6 +474,61 @@ static int sun8i_codec_add_hmic(struct snd_soc_component *cmpnt)
498 return ret; 474 return ret;
499} 475}
500 476
477/* line in specific controls, widgets and rines */
478static const struct snd_kcontrol_new sun8i_codec_linein_controls[] = {
479 /* Mixer pre-gain */
480 SOC_SINGLE_TLV("Line In Playback Volume", SUN8I_ADDA_LINEIN_GCTRL,
481 SUN8I_ADDA_LINEIN_GCTRL_LINEING,
482 0x7, 0, sun8i_codec_out_mixer_pregain_scale),
483};
484
485static const struct snd_soc_dapm_widget sun8i_codec_linein_widgets[] = {
486 /* Line input */
487 SND_SOC_DAPM_INPUT("LINEIN"),
488};
489
490static const struct snd_soc_dapm_route sun8i_codec_linein_routes[] = {
491 { "Left Mixer", "Line In Playback Switch", "LINEIN" },
492
493 { "Right Mixer", "Line In Playback Switch", "LINEIN" },
494
495 { "Left ADC Mixer", "Line In Capture Switch", "LINEIN" },
496
497 { "Right ADC Mixer", "Line In Capture Switch", "LINEIN" },
498};
499
500static int sun8i_codec_add_linein(struct snd_soc_component *cmpnt)
501{
502 struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(cmpnt);
503 struct device *dev = cmpnt->dev;
504 int ret;
505
506 ret = snd_soc_add_component_controls(cmpnt,
507 sun8i_codec_linein_controls,
508 ARRAY_SIZE(sun8i_codec_linein_controls));
509 if (ret) {
510 dev_err(dev, "Failed to add Line In controls: %d\n", ret);
511 return ret;
512 }
513
514 ret = snd_soc_dapm_new_controls(dapm, sun8i_codec_linein_widgets,
515 ARRAY_SIZE(sun8i_codec_linein_widgets));
516 if (ret) {
517 dev_err(dev, "Failed to add Line In DAPM widgets: %d\n", ret);
518 return ret;
519 }
520
521 ret = snd_soc_dapm_add_routes(dapm, sun8i_codec_linein_routes,
522 ARRAY_SIZE(sun8i_codec_linein_routes));
523 if (ret) {
524 dev_err(dev, "Failed to add Line In DAPM routes: %d\n", ret);
525 return ret;
526 }
527
528 return 0;
529}
530
531
501/* line out specific controls, widgets and routes */ 532/* line out specific controls, widgets and routes */
502static const DECLARE_TLV_DB_RANGE(sun8i_codec_lineout_vol_scale, 533static const DECLARE_TLV_DB_RANGE(sun8i_codec_lineout_vol_scale,
503 0, 1, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1), 534 0, 1, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
@@ -578,19 +609,90 @@ static int sun8i_codec_add_lineout(struct snd_soc_component *cmpnt)
578 return 0; 609 return 0;
579} 610}
580 611
612/* mic2 specific controls, widgets and routes */
613static const struct snd_kcontrol_new sun8i_codec_mic2_controls[] = {
614 /* Mixer pre-gain */
615 SOC_SINGLE_TLV("Mic2 Playback Volume",
616 SUN8I_ADDA_MICIN_GCTRL, SUN8I_ADDA_MICIN_GCTRL_MIC2G,
617 0x7, 0, sun8i_codec_out_mixer_pregain_scale),
618
619 /* Microphone Amp boost gain */
620 SOC_SINGLE_TLV("Mic2 Boost Volume", SUN8I_ADDA_MIC2G_CTRL,
621 SUN8I_ADDA_MIC2G_CTRL_MIC2BOOST, 0x7, 0,
622 sun8i_codec_mic_gain_scale),
623};
624
625static const struct snd_soc_dapm_widget sun8i_codec_mic2_widgets[] = {
626 /* Microphone input */
627 SND_SOC_DAPM_INPUT("MIC2"),
628
629 /* Mic input path */
630 SND_SOC_DAPM_PGA("Mic2 Amplifier", SUN8I_ADDA_MIC2G_CTRL,
631 SUN8I_ADDA_MIC2G_CTRL_MIC2AMPEN, 0, NULL, 0),
632};
633
634static const struct snd_soc_dapm_route sun8i_codec_mic2_routes[] = {
635 { "Mic2 Amplifier", NULL, "MIC2"},
636
637 { "Left Mixer", "Mic2 Playback Switch", "Mic2 Amplifier" },
638
639 { "Right Mixer", "Mic2 Playback Switch", "Mic2 Amplifier" },
640
641 { "Left ADC Mixer", "Mic2 Capture Switch", "Mic2 Amplifier" },
642
643 { "Right ADC Mixer", "Mic2 Capture Switch", "Mic2 Amplifier" },
644};
645
646static int sun8i_codec_add_mic2(struct snd_soc_component *cmpnt)
647{
648 struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(cmpnt);
649 struct device *dev = cmpnt->dev;
650 int ret;
651
652 ret = snd_soc_add_component_controls(cmpnt,
653 sun8i_codec_mic2_controls,
654 ARRAY_SIZE(sun8i_codec_mic2_controls));
655 if (ret) {
656 dev_err(dev, "Failed to add MIC2 controls: %d\n", ret);
657 return ret;
658 }
659
660 ret = snd_soc_dapm_new_controls(dapm, sun8i_codec_mic2_widgets,
661 ARRAY_SIZE(sun8i_codec_mic2_widgets));
662 if (ret) {
663 dev_err(dev, "Failed to add MIC2 DAPM widgets: %d\n", ret);
664 return ret;
665 }
666
667 ret = snd_soc_dapm_add_routes(dapm, sun8i_codec_mic2_routes,
668 ARRAY_SIZE(sun8i_codec_mic2_routes));
669 if (ret) {
670 dev_err(dev, "Failed to add MIC2 DAPM routes: %d\n", ret);
671 return ret;
672 }
673
674 return 0;
675}
676
581struct sun8i_codec_analog_quirks { 677struct sun8i_codec_analog_quirks {
582 bool has_headphone; 678 bool has_headphone;
583 bool has_hmic; 679 bool has_hmic;
680 bool has_linein;
584 bool has_lineout; 681 bool has_lineout;
682 bool has_mic2;
585}; 683};
586 684
587static const struct sun8i_codec_analog_quirks sun8i_a23_quirks = { 685static const struct sun8i_codec_analog_quirks sun8i_a23_quirks = {
588 .has_headphone = true, 686 .has_headphone = true,
589 .has_hmic = true, 687 .has_hmic = true,
688 .has_linein = true,
689 .has_mic2 = true,
590}; 690};
591 691
592static const struct sun8i_codec_analog_quirks sun8i_h3_quirks = { 692static const struct sun8i_codec_analog_quirks sun8i_h3_quirks = {
693 .has_linein = true,
593 .has_lineout = true, 694 .has_lineout = true,
695 .has_mic2 = true,
594}; 696};
595 697
596static int sun8i_codec_analog_cmpnt_probe(struct snd_soc_component *cmpnt) 698static int sun8i_codec_analog_cmpnt_probe(struct snd_soc_component *cmpnt)
@@ -620,12 +722,24 @@ static int sun8i_codec_analog_cmpnt_probe(struct snd_soc_component *cmpnt)
620 return ret; 722 return ret;
621 } 723 }
622 724
725 if (quirks->has_linein) {
726 ret = sun8i_codec_add_linein(cmpnt);
727 if (ret)
728 return ret;
729 }
730
623 if (quirks->has_lineout) { 731 if (quirks->has_lineout) {
624 ret = sun8i_codec_add_lineout(cmpnt); 732 ret = sun8i_codec_add_lineout(cmpnt);
625 if (ret) 733 if (ret)
626 return ret; 734 return ret;
627 } 735 }
628 736
737 if (quirks->has_mic2) {
738 ret = sun8i_codec_add_mic2(cmpnt);
739 if (ret)
740 return ret;
741 }
742
629 return 0; 743 return 0;
630} 744}
631 745
diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
index 7527ba29a5a0..5723c3404f6b 100644
--- a/sound/soc/sunxi/sun8i-codec.c
+++ b/sound/soc/sunxi/sun8i-codec.c
@@ -290,12 +290,10 @@ static const struct snd_soc_dapm_widget sun8i_codec_dapm_widgets[] = {
290 SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0R_ENA, 0), 290 SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0R_ENA, 0),
291 291
292 /* DAC Mixers */ 292 /* DAC Mixers */
293 SND_SOC_DAPM_MIXER("Left Digital DAC Mixer", SND_SOC_NOPM, 0, 0, 293 SOC_MIXER_ARRAY("Left Digital DAC Mixer", SND_SOC_NOPM, 0, 0,
294 sun8i_dac_mixer_controls, 294 sun8i_dac_mixer_controls),
295 ARRAY_SIZE(sun8i_dac_mixer_controls)), 295 SOC_MIXER_ARRAY("Right Digital DAC Mixer", SND_SOC_NOPM, 0, 0,
296 SND_SOC_DAPM_MIXER("Right Digital DAC Mixer", SND_SOC_NOPM, 0, 0, 296 sun8i_dac_mixer_controls),
297 sun8i_dac_mixer_controls,
298 ARRAY_SIZE(sun8i_dac_mixer_controls)),
299 297
300 /* Clocks */ 298 /* Clocks */
301 SND_SOC_DAPM_SUPPLY("MODCLK AFI1", SUN8I_MOD_CLK_ENA, 299 SND_SOC_DAPM_SUPPLY("MODCLK AFI1", SUN8I_MOD_CLK_ENA,
diff --git a/sound/soc/tegra/tegra20_ac97.c b/sound/soc/tegra/tegra20_ac97.c
index a68368edab9c..affad46bf188 100644
--- a/sound/soc/tegra/tegra20_ac97.c
+++ b/sound/soc/tegra/tegra20_ac97.c
@@ -318,7 +318,6 @@ static int tegra20_ac97_platform_probe(struct platform_device *pdev)
318 ac97 = devm_kzalloc(&pdev->dev, sizeof(struct tegra20_ac97), 318 ac97 = devm_kzalloc(&pdev->dev, sizeof(struct tegra20_ac97),
319 GFP_KERNEL); 319 GFP_KERNEL);
320 if (!ac97) { 320 if (!ac97) {
321 dev_err(&pdev->dev, "Can't allocate tegra20_ac97\n");
322 ret = -ENOMEM; 321 ret = -ENOMEM;
323 goto err; 322 goto err;
324 } 323 }
diff --git a/sound/soc/tegra/tegra20_das.c b/sound/soc/tegra/tegra20_das.c
index 89add13c31cf..4024e3abbeed 100644
--- a/sound/soc/tegra/tegra20_das.c
+++ b/sound/soc/tegra/tegra20_das.c
@@ -41,6 +41,7 @@ static inline void tegra20_das_write(u32 reg, u32 val)
41static inline u32 tegra20_das_read(u32 reg) 41static inline u32 tegra20_das_read(u32 reg)
42{ 42{
43 u32 val; 43 u32 val;
44
44 regmap_read(das->regmap, reg, &val); 45 regmap_read(das->regmap, reg, &val);
45 return val; 46 return val;
46} 47}
@@ -142,7 +143,6 @@ static int tegra20_das_probe(struct platform_device *pdev)
142 143
143 das = devm_kzalloc(&pdev->dev, sizeof(struct tegra20_das), GFP_KERNEL); 144 das = devm_kzalloc(&pdev->dev, sizeof(struct tegra20_das), GFP_KERNEL);
144 if (!das) { 145 if (!das) {
145 dev_err(&pdev->dev, "Can't allocate tegra20_das\n");
146 ret = -ENOMEM; 146 ret = -ENOMEM;
147 goto err; 147 goto err;
148 } 148 }
diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c
index 14106fa82bca..26253c2849e7 100644
--- a/sound/soc/tegra/tegra20_i2s.c
+++ b/sound/soc/tegra/tegra20_i2s.c
@@ -345,7 +345,6 @@ static int tegra20_i2s_platform_probe(struct platform_device *pdev)
345 345
346 i2s = devm_kzalloc(&pdev->dev, sizeof(struct tegra20_i2s), GFP_KERNEL); 346 i2s = devm_kzalloc(&pdev->dev, sizeof(struct tegra20_i2s), GFP_KERNEL);
347 if (!i2s) { 347 if (!i2s) {
348 dev_err(&pdev->dev, "Can't allocate tegra20_i2s\n");
349 ret = -ENOMEM; 348 ret = -ENOMEM;
350 goto err; 349 goto err;
351 } 350 }
diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
index a0c3640572b9..767c0491e11a 100644
--- a/sound/soc/tegra/tegra20_spdif.c
+++ b/sound/soc/tegra/tegra20_spdif.c
@@ -271,10 +271,9 @@ static int tegra20_spdif_platform_probe(struct platform_device *pdev)
271 271
272 spdif = devm_kzalloc(&pdev->dev, sizeof(struct tegra20_spdif), 272 spdif = devm_kzalloc(&pdev->dev, sizeof(struct tegra20_spdif),
273 GFP_KERNEL); 273 GFP_KERNEL);
274 if (!spdif) { 274 if (!spdif)
275 dev_err(&pdev->dev, "Can't allocate tegra20_spdif\n");
276 return -ENOMEM; 275 return -ENOMEM;
277 } 276
278 dev_set_drvdata(&pdev->dev, spdif); 277 dev_set_drvdata(&pdev->dev, spdif);
279 278
280 spdif->clk_spdif_out = devm_clk_get(&pdev->dev, "spdif_out"); 279 spdif->clk_spdif_out = devm_clk_get(&pdev->dev, "spdif_out");
diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c
index fef3b9a21a66..8c10ae7982ba 100644
--- a/sound/soc/tegra/tegra30_ahub.c
+++ b/sound/soc/tegra/tegra30_ahub.c
@@ -41,6 +41,7 @@ static inline void tegra30_apbif_write(u32 reg, u32 val)
41static inline u32 tegra30_apbif_read(u32 reg) 41static inline u32 tegra30_apbif_read(u32 reg)
42{ 42{
43 u32 val; 43 u32 val;
44
44 regmap_read(ahub->regmap_apbif, reg, &val); 45 regmap_read(ahub->regmap_apbif, reg, &val);
45 return val; 46 return val;
46} 47}
@@ -560,10 +561,8 @@ static int tegra30_ahub_probe(struct platform_device *pdev)
560 561
561 ahub = devm_kzalloc(&pdev->dev, sizeof(struct tegra30_ahub), 562 ahub = devm_kzalloc(&pdev->dev, sizeof(struct tegra30_ahub),
562 GFP_KERNEL); 563 GFP_KERNEL);
563 if (!ahub) { 564 if (!ahub)
564 dev_err(&pdev->dev, "Can't allocate tegra30_ahub\n");
565 return -ENOMEM; 565 return -ENOMEM;
566 }
567 dev_set_drvdata(&pdev->dev, ahub); 566 dev_set_drvdata(&pdev->dev, ahub);
568 567
569 ahub->soc_data = soc_data; 568 ahub->soc_data = soc_data;
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index 8e55583aa104..b2b279c96029 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -385,7 +385,6 @@ static int tegra30_i2s_platform_probe(struct platform_device *pdev)
385 385
386 i2s = devm_kzalloc(&pdev->dev, sizeof(struct tegra30_i2s), GFP_KERNEL); 386 i2s = devm_kzalloc(&pdev->dev, sizeof(struct tegra30_i2s), GFP_KERNEL);
387 if (!i2s) { 387 if (!i2s) {
388 dev_err(&pdev->dev, "Can't allocate tegra30_i2s\n");
389 ret = -ENOMEM; 388 ret = -ENOMEM;
390 goto err; 389 goto err;
391 } 390 }
diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c
index eead6e7f205b..0509902512cc 100644
--- a/sound/soc/tegra/tegra_alc5632.c
+++ b/sound/soc/tegra/tegra_alc5632.c
@@ -169,10 +169,8 @@ static int tegra_alc5632_probe(struct platform_device *pdev)
169 169
170 alc5632 = devm_kzalloc(&pdev->dev, 170 alc5632 = devm_kzalloc(&pdev->dev,
171 sizeof(struct tegra_alc5632), GFP_KERNEL); 171 sizeof(struct tegra_alc5632), GFP_KERNEL);
172 if (!alc5632) { 172 if (!alc5632)
173 dev_err(&pdev->dev, "Can't allocate tegra_alc5632\n");
174 return -ENOMEM; 173 return -ENOMEM;
175 }
176 174
177 card->dev = &pdev->dev; 175 card->dev = &pdev->dev;
178 platform_set_drvdata(pdev, card); 176 platform_set_drvdata(pdev, card);
diff --git a/sound/soc/tegra/tegra_max98090.c b/sound/soc/tegra/tegra_max98090.c
index a403db6d563e..c34a54d6e812 100644
--- a/sound/soc/tegra/tegra_max98090.c
+++ b/sound/soc/tegra/tegra_max98090.c
@@ -225,10 +225,8 @@ static int tegra_max98090_probe(struct platform_device *pdev)
225 225
226 machine = devm_kzalloc(&pdev->dev, 226 machine = devm_kzalloc(&pdev->dev,
227 sizeof(struct tegra_max98090), GFP_KERNEL); 227 sizeof(struct tegra_max98090), GFP_KERNEL);
228 if (!machine) { 228 if (!machine)
229 dev_err(&pdev->dev, "Can't allocate tegra_max98090\n");
230 return -ENOMEM; 229 return -ENOMEM;
231 }
232 230
233 card->dev = &pdev->dev; 231 card->dev = &pdev->dev;
234 platform_set_drvdata(pdev, card); 232 platform_set_drvdata(pdev, card);
diff --git a/sound/soc/tegra/tegra_rt5640.c b/sound/soc/tegra/tegra_rt5640.c
index 25b9fc03ba62..93a356802345 100644
--- a/sound/soc/tegra/tegra_rt5640.c
+++ b/sound/soc/tegra/tegra_rt5640.c
@@ -170,10 +170,8 @@ static int tegra_rt5640_probe(struct platform_device *pdev)
170 170
171 machine = devm_kzalloc(&pdev->dev, 171 machine = devm_kzalloc(&pdev->dev,
172 sizeof(struct tegra_rt5640), GFP_KERNEL); 172 sizeof(struct tegra_rt5640), GFP_KERNEL);
173 if (!machine) { 173 if (!machine)
174 dev_err(&pdev->dev, "Can't allocate tegra_rt5640\n");
175 return -ENOMEM; 174 return -ENOMEM;
176 }
177 175
178 card->dev = &pdev->dev; 176 card->dev = &pdev->dev;
179 platform_set_drvdata(pdev, card); 177 platform_set_drvdata(pdev, card);
diff --git a/sound/soc/tegra/tegra_sgtl5000.c b/sound/soc/tegra/tegra_sgtl5000.c
index 4bbab098f50b..6dda01f69983 100644
--- a/sound/soc/tegra/tegra_sgtl5000.c
+++ b/sound/soc/tegra/tegra_sgtl5000.c
@@ -120,10 +120,8 @@ static int tegra_sgtl5000_driver_probe(struct platform_device *pdev)
120 120
121 machine = devm_kzalloc(&pdev->dev, sizeof(struct tegra_sgtl5000), 121 machine = devm_kzalloc(&pdev->dev, sizeof(struct tegra_sgtl5000),
122 GFP_KERNEL); 122 GFP_KERNEL);
123 if (!machine) { 123 if (!machine)
124 dev_err(&pdev->dev, "Can't allocate tegra_sgtl5000 struct\n");
125 return -ENOMEM; 124 return -ENOMEM;
126 }
127 125
128 card->dev = &pdev->dev; 126 card->dev = &pdev->dev;
129 platform_set_drvdata(pdev, card); 127 platform_set_drvdata(pdev, card);
diff --git a/sound/soc/tegra/tegra_wm8753.c b/sound/soc/tegra/tegra_wm8753.c
index bdedd1028569..d0ab0026a4cd 100644
--- a/sound/soc/tegra/tegra_wm8753.c
+++ b/sound/soc/tegra/tegra_wm8753.c
@@ -128,10 +128,8 @@ static int tegra_wm8753_driver_probe(struct platform_device *pdev)
128 128
129 machine = devm_kzalloc(&pdev->dev, sizeof(struct tegra_wm8753), 129 machine = devm_kzalloc(&pdev->dev, sizeof(struct tegra_wm8753),
130 GFP_KERNEL); 130 GFP_KERNEL);
131 if (!machine) { 131 if (!machine)
132 dev_err(&pdev->dev, "Can't allocate tegra_wm8753 struct\n");
133 return -ENOMEM; 132 return -ENOMEM;
134 }
135 133
136 card->dev = &pdev->dev; 134 card->dev = &pdev->dev;
137 platform_set_drvdata(pdev, card); 135 platform_set_drvdata(pdev, card);
diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c
index 2013e9c4bba0..dbfb49298ae8 100644
--- a/sound/soc/tegra/tegra_wm8903.c
+++ b/sound/soc/tegra/tegra_wm8903.c
@@ -248,10 +248,8 @@ static int tegra_wm8903_driver_probe(struct platform_device *pdev)
248 248
249 machine = devm_kzalloc(&pdev->dev, sizeof(struct tegra_wm8903), 249 machine = devm_kzalloc(&pdev->dev, sizeof(struct tegra_wm8903),
250 GFP_KERNEL); 250 GFP_KERNEL);
251 if (!machine) { 251 if (!machine)
252 dev_err(&pdev->dev, "Can't allocate tegra_wm8903 struct\n");
253 return -ENOMEM; 252 return -ENOMEM;
254 }
255 253
256 card->dev = &pdev->dev; 254 card->dev = &pdev->dev;
257 platform_set_drvdata(pdev, card); 255 platform_set_drvdata(pdev, card);
diff --git a/sound/soc/tegra/tegra_wm9712.c b/sound/soc/tegra/tegra_wm9712.c
index 6492f8143ff1..c9cd22432627 100644
--- a/sound/soc/tegra/tegra_wm9712.c
+++ b/sound/soc/tegra/tegra_wm9712.c
@@ -77,10 +77,8 @@ static int tegra_wm9712_driver_probe(struct platform_device *pdev)
77 77
78 machine = devm_kzalloc(&pdev->dev, sizeof(struct tegra_wm9712), 78 machine = devm_kzalloc(&pdev->dev, sizeof(struct tegra_wm9712),
79 GFP_KERNEL); 79 GFP_KERNEL);
80 if (!machine) { 80 if (!machine)
81 dev_err(&pdev->dev, "Can't allocate tegra_wm9712 struct\n");
82 return -ENOMEM; 81 return -ENOMEM;
83 }
84 82
85 card->dev = &pdev->dev; 83 card->dev = &pdev->dev;
86 platform_set_drvdata(pdev, card); 84 platform_set_drvdata(pdev, card);
diff --git a/sound/soc/tegra/trimslice.c b/sound/soc/tegra/trimslice.c
index 870f84ab5005..c9dcad9bb931 100644
--- a/sound/soc/tegra/trimslice.c
+++ b/sound/soc/tegra/trimslice.c
@@ -123,10 +123,8 @@ static int tegra_snd_trimslice_probe(struct platform_device *pdev)
123 123
124 trimslice = devm_kzalloc(&pdev->dev, sizeof(struct tegra_trimslice), 124 trimslice = devm_kzalloc(&pdev->dev, sizeof(struct tegra_trimslice),
125 GFP_KERNEL); 125 GFP_KERNEL);
126 if (!trimslice) { 126 if (!trimslice)
127 dev_err(&pdev->dev, "Can't allocate tegra_trimslice\n");
128 return -ENOMEM; 127 return -ENOMEM;
129 }
130 128
131 card->dev = &pdev->dev; 129 card->dev = &pdev->dev;
132 platform_set_drvdata(pdev, card); 130 platform_set_drvdata(pdev, card);
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index a8f705bb60dc..7912bf09dc4d 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -206,7 +206,7 @@ static void txx9aclc_dma_tasklet(unsigned long data)
206static int txx9aclc_pcm_trigger(struct snd_pcm_substream *substream, int cmd) 206static int txx9aclc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
207{ 207{
208 struct txx9aclc_dmadata *dmadata = substream->runtime->private_data; 208 struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
209 struct txx9aclc_plat_drvdata *drvdata =txx9aclc_drvdata; 209 struct txx9aclc_plat_drvdata *drvdata = txx9aclc_drvdata;
210 void __iomem *base = drvdata->base; 210 void __iomem *base = drvdata->base;
211 unsigned long flags; 211 unsigned long flags;
212 int ret = 0; 212 int ret = 0;
@@ -340,7 +340,7 @@ static bool filter(struct dma_chan *chan, void *param)
340static int txx9aclc_dma_init(struct txx9aclc_soc_device *dev, 340static int txx9aclc_dma_init(struct txx9aclc_soc_device *dev,
341 struct txx9aclc_dmadata *dmadata) 341 struct txx9aclc_dmadata *dmadata)
342{ 342{
343 struct txx9aclc_plat_drvdata *drvdata =txx9aclc_drvdata; 343 struct txx9aclc_plat_drvdata *drvdata = txx9aclc_drvdata;
344 struct txx9dmac_slave *ds = &dmadata->dma_slave; 344 struct txx9dmac_slave *ds = &dmadata->dma_slave;
345 dma_cap_mask_t mask; 345 dma_cap_mask_t mask;
346 346
@@ -392,6 +392,7 @@ static int txx9aclc_pcm_remove(struct snd_soc_platform *platform)
392 for (i = 0; i < 2; i++) { 392 for (i = 0; i < 2; i++) {
393 struct txx9aclc_dmadata *dmadata = &dev->dmadata[i]; 393 struct txx9aclc_dmadata *dmadata = &dev->dmadata[i];
394 struct dma_chan *chan = dmadata->dma_chan; 394 struct dma_chan *chan = dmadata->dma_chan;
395
395 if (chan) { 396 if (chan) {
396 dmadata->frag_count = -1; 397 dmadata->frag_count = -1;
397 dmaengine_terminate_all(chan); 398 dmaengine_terminate_all(chan);
diff --git a/sound/soc/ux500/mop500.c b/sound/soc/ux500/mop500.c
index ba9fc099cf67..b50f68a439ce 100644
--- a/sound/soc/ux500/mop500.c
+++ b/sound/soc/ux500/mop500.c
@@ -33,7 +33,6 @@ static struct snd_soc_dai_link mop500_dai_links[] = {
33 .stream_name = "ab8500_0", 33 .stream_name = "ab8500_0",
34 .cpu_dai_name = "ux500-msp-i2s.1", 34 .cpu_dai_name = "ux500-msp-i2s.1",
35 .codec_dai_name = "ab8500-codec-dai.0", 35 .codec_dai_name = "ab8500-codec-dai.0",
36 .platform_name = "ux500-msp-i2s.1",
37 .codec_name = "ab8500-codec.0", 36 .codec_name = "ab8500-codec.0",
38 .init = mop500_ab8500_machine_init, 37 .init = mop500_ab8500_machine_init,
39 .ops = mop500_ab8500_ops, 38 .ops = mop500_ab8500_ops,
@@ -43,7 +42,6 @@ static struct snd_soc_dai_link mop500_dai_links[] = {
43 .stream_name = "ab8500_1", 42 .stream_name = "ab8500_1",
44 .cpu_dai_name = "ux500-msp-i2s.3", 43 .cpu_dai_name = "ux500-msp-i2s.3",
45 .codec_dai_name = "ab8500-codec-dai.1", 44 .codec_dai_name = "ab8500-codec-dai.1",
46 .platform_name = "ux500-msp-i2s.3",
47 .codec_name = "ab8500-codec.0", 45 .codec_name = "ab8500-codec.0",
48 .init = NULL, 46 .init = NULL,
49 .ops = mop500_ab8500_ops, 47 .ops = mop500_ab8500_ops,
@@ -87,8 +85,6 @@ static int mop500_of_probe(struct platform_device *pdev,
87 for (i = 0; i < 2; i++) { 85 for (i = 0; i < 2; i++) {
88 mop500_dai_links[i].cpu_of_node = msp_np[i]; 86 mop500_dai_links[i].cpu_of_node = msp_np[i];
89 mop500_dai_links[i].cpu_dai_name = NULL; 87 mop500_dai_links[i].cpu_dai_name = NULL;
90 mop500_dai_links[i].platform_of_node = msp_np[i];
91 mop500_dai_links[i].platform_name = NULL;
92 mop500_dai_links[i].codec_of_node = codec_np; 88 mop500_dai_links[i].codec_of_node = codec_np;
93 mop500_dai_links[i].codec_name = NULL; 89 mop500_dai_links[i].codec_name = NULL;
94 } 90 }
diff --git a/sound/soc/ux500/ux500_msp_dai.c b/sound/soc/ux500/ux500_msp_dai.c
index b343efd9be5b..ec5152aa3f6e 100644
--- a/sound/soc/ux500/ux500_msp_dai.c
+++ b/sound/soc/ux500/ux500_msp_dai.c
@@ -133,6 +133,7 @@ static int setup_pcm_framing(struct snd_soc_dai *dai, unsigned int rate,
133 struct ux500_msp_i2s_drvdata *drvdata = dev_get_drvdata(dai->dev); 133 struct ux500_msp_i2s_drvdata *drvdata = dev_get_drvdata(dai->dev);
134 134
135 u32 frame_length = MSP_FRAME_LEN_1; 135 u32 frame_length = MSP_FRAME_LEN_1;
136
136 prot_desc->frame_width = 0; 137 prot_desc->frame_width = 0;
137 138
138 switch (drvdata->slots) { 139 switch (drvdata->slots) {
@@ -482,7 +483,8 @@ static int ux500_msp_dai_prepare(struct snd_pcm_substream *substream,
482 if ((drvdata->fmt & SND_SOC_DAIFMT_MASTER_MASK) && 483 if ((drvdata->fmt & SND_SOC_DAIFMT_MASTER_MASK) &&
483 (drvdata->msp->f_bitclk > 19200000)) { 484 (drvdata->msp->f_bitclk > 19200000)) {
484 /* If the bit-clock is higher than 19.2MHz, Vape should be 485 /* If the bit-clock is higher than 19.2MHz, Vape should be
485 * run in 100% OPP. Only when bit-clock is used (MSP master) */ 486 * run in 100% OPP. Only when bit-clock is used (MSP master)
487 */
486 prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, 488 prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
487 "ux500-msp-i2s", 100); 489 "ux500-msp-i2s", 100);
488 drvdata->vape_opp_constraint = 1; 490 drvdata->vape_opp_constraint = 1;
diff --git a/sound/soc/ux500/ux500_msp_i2s.c b/sound/soc/ux500/ux500_msp_i2s.c
index 959d7b4edf56..bd5266aca0f1 100644
--- a/sound/soc/ux500/ux500_msp_i2s.c
+++ b/sound/soc/ux500/ux500_msp_i2s.c
@@ -604,7 +604,6 @@ int ux500_msp_i2s_trigger(struct ux500_msp *msp, int cmd, int direction)
604 break; 604 break;
605 default: 605 default:
606 return -EINVAL; 606 return -EINVAL;
607 break;
608 } 607 }
609 608
610 return 0; 609 return 0;
diff --git a/sound/soc/zte/Kconfig b/sound/soc/zte/Kconfig
index 6d8a90d36315..75f67a5d23ea 100644
--- a/sound/soc/zte/Kconfig
+++ b/sound/soc/zte/Kconfig
@@ -15,3 +15,11 @@ config ZX_I2S
15 help 15 help
16 Say Y or M if you want to add support for codecs attached to the 16 Say Y or M if you want to add support for codecs attached to the
17 ZTE ZX I2S interface 17 ZTE ZX I2S interface
18
19config ZX_TDM
20 tristate "ZTE ZX TDM Driver Support"
21 depends on COMMON_CLK
22 select SND_SOC_GENERIC_DMAENGINE_PCM
23 help
24 Say Y or M if you want to add support for codecs attached to the
25 ZTE ZX TDM interface
diff --git a/sound/soc/zte/Makefile b/sound/soc/zte/Makefile
index 77768f5fd10c..1fc841acdfdd 100644
--- a/sound/soc/zte/Makefile
+++ b/sound/soc/zte/Makefile
@@ -1,2 +1,3 @@
1obj-$(CONFIG_ZX_SPDIF) += zx-spdif.o 1obj-$(CONFIG_ZX_SPDIF) += zx-spdif.o
2obj-$(CONFIG_ZX_I2S) += zx-i2s.o 2obj-$(CONFIG_ZX_I2S) += zx-i2s.o
3obj-$(CONFIG_ZX_TDM) += zx-tdm.o
diff --git a/sound/soc/zte/zx-tdm.c b/sound/soc/zte/zx-tdm.c
new file mode 100644
index 000000000000..bd632cc503b3
--- /dev/null
+++ b/sound/soc/zte/zx-tdm.c
@@ -0,0 +1,461 @@
1/*
2 * ZTE's TDM driver
3 *
4 * Copyright (C) 2017 ZTE Ltd
5 *
6 * Author: Baoyou Xie <baoyou.xie@linaro.org>
7 *
8 * License terms: GNU General Public License (GPL) version 2
9 */
10
11#include <linux/clk.h>
12#include <linux/io.h>
13#include <linux/mfd/syscon.h>
14#include <linux/module.h>
15#include <sound/dmaengine_pcm.h>
16#include <sound/pcm_params.h>
17#include <sound/soc.h>
18#include <sound/soc-dai.h>
19
20#define REG_TIMING_CTRL 0x04
21#define REG_TX_FIFO_CTRL 0x0C
22#define REG_RX_FIFO_CTRL 0x10
23#define REG_INT_EN 0x1C
24#define REG_INT_STATUS 0x20
25#define REG_DATABUF 0x24
26#define REG_TS_MASK0 0x44
27#define REG_PROCESS_CTRL 0x54
28
29#define FIFO_CTRL_TX_RST BIT(0)
30#define FIFO_CTRL_RX_RST BIT(0)
31#define DEAGULT_FIFO_THRES GENMASK(4, 2)
32
33#define FIFO_CTRL_TX_DMA_EN BIT(1)
34#define FIFO_CTRL_RX_DMA_EN BIT(1)
35
36#define TX_FIFO_RST_MASK BIT(0)
37#define RX_FIFO_RST_MASK BIT(0)
38
39#define FIFOCTRL_TX_FIFO_RST BIT(0)
40#define FIFOCTRL_RX_FIFO_RST BIT(0)
41
42#define TXTH_MASK GENMASK(5, 2)
43#define RXTH_MASK GENMASK(5, 2)
44
45#define FIFOCTRL_THRESHOLD(x) ((x) << 2)
46
47#define TIMING_MS_MASK BIT(1)
48/*
49 * 00: 8 clk cycles every timeslot
50 * 01: 16 clk cycles every timeslot
51 * 10: 32 clk cycles every timeslot
52 */
53#define TIMING_SYNC_WIDTH_MASK GENMASK(6, 5)
54#define TIMING_WIDTH_SHIFT 5
55#define TIMING_DEFAULT_WIDTH 0
56#define TIMING_TS_WIDTH(x) ((x) << TIMING_WIDTH_SHIFT)
57#define TIMING_WIDTH_FACTOR 8
58
59#define TIMING_MASTER_MODE BIT(21)
60#define TIMING_LSB_FIRST BIT(20)
61#define TIMING_TS_NUM(x) (((x) - 1) << 7)
62#define TIMING_CLK_SEL_MASK GENMASK(2, 0)
63#define TIMING_CLK_SEL_DEF BIT(2)
64
65#define PROCESS_TX_EN BIT(0)
66#define PROCESS_RX_EN BIT(1)
67#define PROCESS_TDM_EN BIT(2)
68#define PROCESS_DISABLE_ALL 0
69
70#define INT_DISABLE_ALL 0
71#define INT_STATUS_MASK GENMASK(6, 0)
72
73struct zx_tdm_info {
74 struct snd_dmaengine_dai_dma_data dma_playback;
75 struct snd_dmaengine_dai_dma_data dma_capture;
76 resource_size_t phy_addr;
77 void __iomem *regbase;
78 struct clk *dai_wclk;
79 struct clk *dai_pclk;
80 int master;
81 struct device *dev;
82};
83
84static inline u32 zx_tdm_readl(struct zx_tdm_info *tdm, u16 reg)
85{
86 return readl_relaxed(tdm->regbase + reg);
87}
88
89static inline void zx_tdm_writel(struct zx_tdm_info *tdm, u16 reg, u32 val)
90{
91 writel_relaxed(val, tdm->regbase + reg);
92}
93
94static void zx_tdm_tx_en(struct zx_tdm_info *tdm, bool on)
95{
96 unsigned long val;
97
98 val = zx_tdm_readl(tdm, REG_PROCESS_CTRL);
99 if (on)
100 val |= PROCESS_TX_EN | PROCESS_TDM_EN;
101 else
102 val &= ~(PROCESS_TX_EN | PROCESS_TDM_EN);
103 zx_tdm_writel(tdm, REG_PROCESS_CTRL, val);
104}
105
106static void zx_tdm_rx_en(struct zx_tdm_info *tdm, bool on)
107{
108 unsigned long val;
109
110 val = zx_tdm_readl(tdm, REG_PROCESS_CTRL);
111 if (on)
112 val |= PROCESS_RX_EN | PROCESS_TDM_EN;
113 else
114 val &= ~(PROCESS_RX_EN | PROCESS_TDM_EN);
115 zx_tdm_writel(tdm, REG_PROCESS_CTRL, val);
116}
117
118static void zx_tdm_tx_dma_en(struct zx_tdm_info *tdm, bool on)
119{
120 unsigned long val;
121
122 val = zx_tdm_readl(tdm, REG_TX_FIFO_CTRL);
123 val |= FIFO_CTRL_TX_RST | DEAGULT_FIFO_THRES;
124 if (on)
125 val |= FIFO_CTRL_TX_DMA_EN;
126 else
127 val &= ~FIFO_CTRL_TX_DMA_EN;
128 zx_tdm_writel(tdm, REG_TX_FIFO_CTRL, val);
129}
130
131static void zx_tdm_rx_dma_en(struct zx_tdm_info *tdm, bool on)
132{
133 unsigned long val;
134
135 val = zx_tdm_readl(tdm, REG_RX_FIFO_CTRL);
136 val |= FIFO_CTRL_RX_RST | DEAGULT_FIFO_THRES;
137 if (on)
138 val |= FIFO_CTRL_RX_DMA_EN;
139 else
140 val &= ~FIFO_CTRL_RX_DMA_EN;
141 zx_tdm_writel(tdm, REG_RX_FIFO_CTRL, val);
142}
143
144#define ZX_TDM_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000)
145
146#define ZX_TDM_FMTBIT \
147 (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FORMAT_MU_LAW | \
148 SNDRV_PCM_FORMAT_A_LAW)
149
150static int zx_tdm_dai_probe(struct snd_soc_dai *dai)
151{
152 struct zx_tdm_info *zx_tdm = dev_get_drvdata(dai->dev);
153
154 snd_soc_dai_set_drvdata(dai, zx_tdm);
155 zx_tdm->dma_playback.addr = zx_tdm->phy_addr + REG_DATABUF;
156 zx_tdm->dma_playback.maxburst = 16;
157 zx_tdm->dma_capture.addr = zx_tdm->phy_addr + REG_DATABUF;
158 zx_tdm->dma_capture.maxburst = 16;
159 snd_soc_dai_init_dma_data(dai, &zx_tdm->dma_playback,
160 &zx_tdm->dma_capture);
161 return 0;
162}
163
164static int zx_tdm_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
165{
166 struct zx_tdm_info *tdm = snd_soc_dai_get_drvdata(cpu_dai);
167 unsigned long val;
168
169 val = zx_tdm_readl(tdm, REG_TIMING_CTRL);
170 val &= ~(TIMING_SYNC_WIDTH_MASK | TIMING_MS_MASK);
171 val |= TIMING_DEFAULT_WIDTH << TIMING_WIDTH_SHIFT;
172
173 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
174 case SND_SOC_DAIFMT_CBM_CFM:
175 tdm->master = 1;
176 val |= TIMING_MASTER_MODE;
177 break;
178 case SND_SOC_DAIFMT_CBS_CFS:
179 tdm->master = 0;
180 val &= ~TIMING_MASTER_MODE;
181 break;
182 default:
183 dev_err(cpu_dai->dev, "Unknown master/slave format\n");
184 return -EINVAL;
185 }
186
187
188 zx_tdm_writel(tdm, REG_TIMING_CTRL, val);
189
190 return 0;
191}
192
193static int zx_tdm_hw_params(struct snd_pcm_substream *substream,
194 struct snd_pcm_hw_params *params,
195 struct snd_soc_dai *socdai)
196{
197 struct zx_tdm_info *tdm = snd_soc_dai_get_drvdata(socdai);
198 struct snd_dmaengine_dai_dma_data *dma_data;
199 unsigned int ts_width = TIMING_DEFAULT_WIDTH;
200 unsigned int ch_num = 32;
201 unsigned int mask = 0;
202 unsigned int ret = 0;
203 unsigned long val;
204
205 dma_data = snd_soc_dai_get_dma_data(socdai, substream);
206 dma_data->addr_width = ch_num >> 3;
207
208 switch (params_format(params)) {
209 case SNDRV_PCM_FORMAT_MU_LAW:
210 case SNDRV_PCM_FORMAT_A_LAW:
211 case SNDRV_PCM_FORMAT_S16_LE:
212 ts_width = 1;
213 break;
214 default:
215 ts_width = 0;
216 dev_err(socdai->dev, "Unknown data format\n");
217 return -EINVAL;
218 }
219
220 val = zx_tdm_readl(tdm, REG_TIMING_CTRL);
221 val |= TIMING_TS_WIDTH(ts_width) | TIMING_TS_NUM(1);
222 zx_tdm_writel(tdm, REG_TIMING_CTRL, val);
223 zx_tdm_writel(tdm, REG_TS_MASK0, mask);
224
225 if (tdm->master)
226 ret = clk_set_rate(tdm->dai_wclk,
227 params_rate(params) * TIMING_WIDTH_FACTOR * ch_num);
228
229 return ret;
230}
231
232static int zx_tdm_trigger(struct snd_pcm_substream *substream, int cmd,
233 struct snd_soc_dai *dai)
234{
235 int capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
236 struct zx_tdm_info *zx_tdm = dev_get_drvdata(dai->dev);
237 unsigned int val;
238 int ret = 0;
239
240 switch (cmd) {
241 case SNDRV_PCM_TRIGGER_START:
242 if (capture) {
243 val = zx_tdm_readl(zx_tdm, REG_RX_FIFO_CTRL);
244 val |= FIFOCTRL_RX_FIFO_RST;
245 zx_tdm_writel(zx_tdm, REG_RX_FIFO_CTRL, val);
246
247 zx_tdm_rx_dma_en(zx_tdm, true);
248 } else {
249 val = zx_tdm_readl(zx_tdm, REG_TX_FIFO_CTRL);
250 val |= FIFOCTRL_TX_FIFO_RST;
251 zx_tdm_writel(zx_tdm, REG_TX_FIFO_CTRL, val);
252
253 zx_tdm_tx_dma_en(zx_tdm, true);
254 }
255 break;
256 case SNDRV_PCM_TRIGGER_RESUME:
257 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
258 if (capture)
259 zx_tdm_rx_en(zx_tdm, true);
260 else
261 zx_tdm_tx_en(zx_tdm, true);
262 break;
263 case SNDRV_PCM_TRIGGER_STOP:
264 if (capture)
265 zx_tdm_rx_dma_en(zx_tdm, false);
266 else
267 zx_tdm_tx_dma_en(zx_tdm, false);
268 break;
269 case SNDRV_PCM_TRIGGER_SUSPEND:
270 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
271 if (capture)
272 zx_tdm_rx_en(zx_tdm, false);
273 else
274 zx_tdm_tx_en(zx_tdm, false);
275 break;
276 default:
277 ret = -EINVAL;
278 break;
279 }
280
281 return ret;
282}
283
284static int zx_tdm_startup(struct snd_pcm_substream *substream,
285 struct snd_soc_dai *dai)
286{
287 struct zx_tdm_info *zx_tdm = dev_get_drvdata(dai->dev);
288 int ret;
289
290 ret = clk_prepare_enable(zx_tdm->dai_wclk);
291 if (ret)
292 return ret;
293
294 ret = clk_prepare_enable(zx_tdm->dai_pclk);
295 if (ret) {
296 clk_disable_unprepare(zx_tdm->dai_wclk);
297 return ret;
298 }
299
300 return 0;
301}
302
303static void zx_tdm_shutdown(struct snd_pcm_substream *substream,
304 struct snd_soc_dai *dai)
305{
306 struct zx_tdm_info *zx_tdm = dev_get_drvdata(dai->dev);
307
308 clk_disable_unprepare(zx_tdm->dai_pclk);
309 clk_disable_unprepare(zx_tdm->dai_wclk);
310}
311
312static struct snd_soc_dai_ops zx_tdm_dai_ops = {
313 .trigger = zx_tdm_trigger,
314 .hw_params = zx_tdm_hw_params,
315 .set_fmt = zx_tdm_set_fmt,
316 .startup = zx_tdm_startup,
317 .shutdown = zx_tdm_shutdown,
318};
319
320static const struct snd_soc_component_driver zx_tdm_component = {
321 .name = "zx-tdm",
322};
323
324static void zx_tdm_init_state(struct zx_tdm_info *tdm)
325{
326 unsigned int val;
327
328 zx_tdm_writel(tdm, REG_PROCESS_CTRL, PROCESS_DISABLE_ALL);
329
330 val = zx_tdm_readl(tdm, REG_TIMING_CTRL);
331 val |= TIMING_LSB_FIRST;
332 val &= ~TIMING_CLK_SEL_MASK;
333 val |= TIMING_CLK_SEL_DEF;
334 zx_tdm_writel(tdm, REG_TIMING_CTRL, val);
335
336 zx_tdm_writel(tdm, REG_INT_EN, INT_DISABLE_ALL);
337 /*
338 * write INT_STATUS register to clear it.
339 */
340 zx_tdm_writel(tdm, REG_INT_STATUS, INT_STATUS_MASK);
341 zx_tdm_writel(tdm, REG_RX_FIFO_CTRL, FIFOCTRL_RX_FIFO_RST);
342 zx_tdm_writel(tdm, REG_TX_FIFO_CTRL, FIFOCTRL_TX_FIFO_RST);
343
344 val = zx_tdm_readl(tdm, REG_RX_FIFO_CTRL);
345 val &= ~(RXTH_MASK | RX_FIFO_RST_MASK);
346 val |= FIFOCTRL_THRESHOLD(8);
347 zx_tdm_writel(tdm, REG_RX_FIFO_CTRL, val);
348
349 val = zx_tdm_readl(tdm, REG_TX_FIFO_CTRL);
350 val &= ~(TXTH_MASK | TX_FIFO_RST_MASK);
351 val |= FIFOCTRL_THRESHOLD(8);
352 zx_tdm_writel(tdm, REG_TX_FIFO_CTRL, val);
353}
354
355static struct snd_soc_dai_driver zx_tdm_dai = {
356 .name = "zx-tdm-dai",
357 .id = 0,
358 .probe = zx_tdm_dai_probe,
359 .playback = {
360 .channels_min = 1,
361 .channels_max = 4,
362 .rates = ZX_TDM_RATES,
363 .formats = ZX_TDM_FMTBIT,
364 },
365 .capture = {
366 .channels_min = 1,
367 .channels_max = 4,
368 .rates = ZX_TDM_RATES,
369 .formats = ZX_TDM_FMTBIT,
370 },
371 .ops = &zx_tdm_dai_ops,
372};
373
374static int zx_tdm_probe(struct platform_device *pdev)
375{
376 struct device *dev = &pdev->dev;
377 struct of_phandle_args out_args;
378 unsigned int dma_reg_offset;
379 struct zx_tdm_info *zx_tdm;
380 unsigned int dma_mask;
381 struct resource *res;
382 struct regmap *regmap_sysctrl;
383 int ret;
384
385 zx_tdm = devm_kzalloc(&pdev->dev, sizeof(*zx_tdm), GFP_KERNEL);
386 if (!zx_tdm)
387 return -ENOMEM;
388
389 zx_tdm->dev = dev;
390
391 zx_tdm->dai_wclk = devm_clk_get(&pdev->dev, "wclk");
392 if (IS_ERR(zx_tdm->dai_wclk)) {
393 dev_err(&pdev->dev, "Fail to get wclk\n");
394 return PTR_ERR(zx_tdm->dai_wclk);
395 }
396
397 zx_tdm->dai_pclk = devm_clk_get(&pdev->dev, "pclk");
398 if (IS_ERR(zx_tdm->dai_pclk)) {
399 dev_err(&pdev->dev, "Fail to get pclk\n");
400 return PTR_ERR(zx_tdm->dai_pclk);
401 }
402
403 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
404 zx_tdm->phy_addr = res->start;
405 zx_tdm->regbase = devm_ioremap_resource(&pdev->dev, res);
406 if (IS_ERR(zx_tdm->regbase))
407 return PTR_ERR(zx_tdm->regbase);
408
409 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
410 "zte,tdm-dma-sysctrl", 2, 0, &out_args);
411 if (ret) {
412 dev_err(&pdev->dev, "Fail to get zte,tdm-dma-sysctrl\n");
413 return ret;
414 }
415
416 dma_reg_offset = out_args.args[0];
417 dma_mask = out_args.args[1];
418 regmap_sysctrl = syscon_node_to_regmap(out_args.np);
419 if (IS_ERR(regmap_sysctrl)) {
420 of_node_put(out_args.np);
421 return PTR_ERR(regmap_sysctrl);
422 }
423
424 regmap_update_bits(regmap_sysctrl, dma_reg_offset, dma_mask, dma_mask);
425 of_node_put(out_args.np);
426
427 zx_tdm_init_state(zx_tdm);
428 platform_set_drvdata(pdev, zx_tdm);
429
430 ret = devm_snd_soc_register_component(&pdev->dev, &zx_tdm_component,
431 &zx_tdm_dai, 1);
432 if (ret) {
433 dev_err(&pdev->dev, "Register DAI failed: %d\n", ret);
434 return ret;
435 }
436
437 ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
438 if (ret)
439 dev_err(&pdev->dev, "Register platform PCM failed: %d\n", ret);
440
441 return ret;
442}
443
444static const struct of_device_id zx_tdm_dt_ids[] = {
445 { .compatible = "zte,zx296718-tdm", },
446 {}
447};
448MODULE_DEVICE_TABLE(of, zx_tdm_dt_ids);
449
450static struct platform_driver tdm_driver = {
451 .probe = zx_tdm_probe,
452 .driver = {
453 .name = "zx-tdm",
454 .of_match_table = zx_tdm_dt_ids,
455 },
456};
457module_platform_driver(tdm_driver);
458
459MODULE_AUTHOR("Baoyou Xie <baoyou.xie@linaro.org>");
460MODULE_DESCRIPTION("ZTE TDM DAI driver");
461MODULE_LICENSE("GPL v2");
diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h
index 122153b16ea4..390d7c9685fd 100644
--- a/tools/include/linux/filter.h
+++ b/tools/include/linux/filter.h
@@ -168,6 +168,16 @@
168 .off = OFF, \ 168 .off = OFF, \
169 .imm = 0 }) 169 .imm = 0 })
170 170
171/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
172
173#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
174 ((struct bpf_insn) { \
175 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
176 .dst_reg = DST, \
177 .src_reg = SRC, \
178 .off = OFF, \
179 .imm = 0 })
180
171/* Memory store, *(uint *) (dst_reg + off16) = imm32 */ 181/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
172 182
173#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ 183#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 273f21fa32b5..7aa57225cbf7 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -130,6 +130,12 @@ static struct arch architectures[] = {
130 .name = "powerpc", 130 .name = "powerpc",
131 .init = powerpc__annotate_init, 131 .init = powerpc__annotate_init,
132 }, 132 },
133 {
134 .name = "s390",
135 .objdump = {
136 .comment_char = '#',
137 },
138 },
133}; 139};
134 140
135static void ins__delete(struct ins_operands *ops) 141static void ins__delete(struct ins_operands *ops)
diff --git a/tools/power/cpupower/utils/helpers/cpuid.c b/tools/power/cpupower/utils/helpers/cpuid.c
index 93b0aa74ca03..39c2c7d067bb 100644
--- a/tools/power/cpupower/utils/helpers/cpuid.c
+++ b/tools/power/cpupower/utils/helpers/cpuid.c
@@ -156,6 +156,7 @@ out:
156 */ 156 */
157 case 0x2C: /* Westmere EP - Gulftown */ 157 case 0x2C: /* Westmere EP - Gulftown */
158 cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO; 158 cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO;
159 break;
159 case 0x2A: /* SNB */ 160 case 0x2A: /* SNB */
160 case 0x2D: /* SNB Xeon */ 161 case 0x2D: /* SNB Xeon */
161 case 0x3A: /* IVB */ 162 case 0x3A: /* IVB */
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index fedca3285326..ccf2a69365cc 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -100,6 +100,8 @@ The system configuration dump (if --quiet is not used) is followed by statistics
100\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. These numbers are from hardware residency counters. 100\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. These numbers are from hardware residency counters.
101\fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor. 101\fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor.
102\fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor. 102\fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor.
103\fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms.
104\fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz.
103\fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. These numbers are from hardware residency counters. 105\fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. These numbers are from hardware residency counters.
104\fBPkgWatt\fP Watts consumed by the whole package. 106\fBPkgWatt\fP Watts consumed by the whole package.
105\fBCorWatt\fP Watts consumed by the core part of the package. 107\fBCorWatt\fP Watts consumed by the core part of the package.
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 828dccd3f01e..b11294730771 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -1142,7 +1142,7 @@ delta_thread(struct thread_data *new, struct thread_data *old,
1142 * it is possible for mperf's non-halted cycles + idle states 1142 * it is possible for mperf's non-halted cycles + idle states
1143 * to exceed TSC's all cycles: show c1 = 0% in that case. 1143 * to exceed TSC's all cycles: show c1 = 0% in that case.
1144 */ 1144 */
1145 if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc) 1145 if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > (old->tsc * tsc_tweak))
1146 old->c1 = 0; 1146 old->c1 = 0;
1147 else { 1147 else {
1148 /* normal case, derive c1 */ 1148 /* normal case, derive c1 */
@@ -2485,8 +2485,10 @@ int snapshot_gfx_mhz(void)
2485 2485
2486 if (fp == NULL) 2486 if (fp == NULL)
2487 fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r"); 2487 fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r");
2488 else 2488 else {
2489 rewind(fp); 2489 rewind(fp);
2490 fflush(fp);
2491 }
2490 2492
2491 retval = fscanf(fp, "%d", &gfx_cur_mhz); 2493 retval = fscanf(fp, "%d", &gfx_cur_mhz);
2492 if (retval != 1) 2494 if (retval != 1)
@@ -3111,7 +3113,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3111 return 0; 3113 return 0;
3112 3114
3113 fprintf(outf, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx " 3115 fprintf(outf, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx "
3114 "(high 0x%x guar 0x%x eff 0x%x low 0x%x)\n", 3116 "(high %d guar %d eff %d low %d)\n",
3115 cpu, msr, 3117 cpu, msr,
3116 (unsigned int)HWP_HIGHEST_PERF(msr), 3118 (unsigned int)HWP_HIGHEST_PERF(msr),
3117 (unsigned int)HWP_GUARANTEED_PERF(msr), 3119 (unsigned int)HWP_GUARANTEED_PERF(msr),
@@ -3122,7 +3124,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3122 return 0; 3124 return 0;
3123 3125
3124 fprintf(outf, "cpu%d: MSR_HWP_REQUEST: 0x%08llx " 3126 fprintf(outf, "cpu%d: MSR_HWP_REQUEST: 0x%08llx "
3125 "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x pkg 0x%x)\n", 3127 "(min %d max %d des %d epp 0x%x window 0x%x pkg 0x%x)\n",
3126 cpu, msr, 3128 cpu, msr,
3127 (unsigned int)(((msr) >> 0) & 0xff), 3129 (unsigned int)(((msr) >> 0) & 0xff),
3128 (unsigned int)(((msr) >> 8) & 0xff), 3130 (unsigned int)(((msr) >> 8) & 0xff),
@@ -3136,7 +3138,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3136 return 0; 3138 return 0;
3137 3139
3138 fprintf(outf, "cpu%d: MSR_HWP_REQUEST_PKG: 0x%08llx " 3140 fprintf(outf, "cpu%d: MSR_HWP_REQUEST_PKG: 0x%08llx "
3139 "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x)\n", 3141 "(min %d max %d des %d epp 0x%x window 0x%x)\n",
3140 cpu, msr, 3142 cpu, msr,
3141 (unsigned int)(((msr) >> 0) & 0xff), 3143 (unsigned int)(((msr) >> 0) & 0xff),
3142 (unsigned int)(((msr) >> 8) & 0xff), 3144 (unsigned int)(((msr) >> 8) & 0xff),
@@ -3353,17 +3355,19 @@ void rapl_probe(unsigned int family, unsigned int model)
3353 case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */ 3355 case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
3354 case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ 3356 case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
3355 case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ 3357 case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
3356 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; 3358 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO;
3357 BIC_PRESENT(BIC_PKG__); 3359 BIC_PRESENT(BIC_PKG__);
3358 BIC_PRESENT(BIC_RAM__); 3360 BIC_PRESENT(BIC_RAM__);
3359 if (rapl_joules) { 3361 if (rapl_joules) {
3360 BIC_PRESENT(BIC_Pkg_J); 3362 BIC_PRESENT(BIC_Pkg_J);
3361 BIC_PRESENT(BIC_Cor_J); 3363 BIC_PRESENT(BIC_Cor_J);
3362 BIC_PRESENT(BIC_RAM_J); 3364 BIC_PRESENT(BIC_RAM_J);
3365 BIC_PRESENT(BIC_GFX_J);
3363 } else { 3366 } else {
3364 BIC_PRESENT(BIC_PkgWatt); 3367 BIC_PRESENT(BIC_PkgWatt);
3365 BIC_PRESENT(BIC_CorWatt); 3368 BIC_PRESENT(BIC_CorWatt);
3366 BIC_PRESENT(BIC_RAMWatt); 3369 BIC_PRESENT(BIC_RAMWatt);
3370 BIC_PRESENT(BIC_GFXWatt);
3367 } 3371 }
3368 break; 3372 break;
3369 case INTEL_FAM6_HASWELL_X: /* HSX */ 3373 case INTEL_FAM6_HASWELL_X: /* HSX */
@@ -3478,7 +3482,7 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model)
3478int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p) 3482int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3479{ 3483{
3480 unsigned long long msr; 3484 unsigned long long msr;
3481 unsigned int dts; 3485 unsigned int dts, dts2;
3482 int cpu; 3486 int cpu;
3483 3487
3484 if (!(do_dts || do_ptm)) 3488 if (!(do_dts || do_ptm))
@@ -3503,7 +3507,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
3503 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n", 3507 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n",
3504 cpu, msr, tcc_activation_temp - dts); 3508 cpu, msr, tcc_activation_temp - dts);
3505 3509
3506#ifdef THERM_DEBUG
3507 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr)) 3510 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr))
3508 return 0; 3511 return 0;
3509 3512
@@ -3511,11 +3514,10 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
3511 dts2 = (msr >> 8) & 0x7F; 3514 dts2 = (msr >> 8) & 0x7F;
3512 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", 3515 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
3513 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); 3516 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
3514#endif
3515 } 3517 }
3516 3518
3517 3519
3518 if (do_dts) { 3520 if (do_dts && debug) {
3519 unsigned int resolution; 3521 unsigned int resolution;
3520 3522
3521 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) 3523 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
@@ -3526,7 +3528,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
3526 fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n", 3528 fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n",
3527 cpu, msr, tcc_activation_temp - dts, resolution); 3529 cpu, msr, tcc_activation_temp - dts, resolution);
3528 3530
3529#ifdef THERM_DEBUG
3530 if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr)) 3531 if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr))
3531 return 0; 3532 return 0;
3532 3533
@@ -3534,7 +3535,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
3534 dts2 = (msr >> 8) & 0x7F; 3535 dts2 = (msr >> 8) & 0x7F;
3535 fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", 3536 fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
3536 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); 3537 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
3537#endif
3538 } 3538 }
3539 3539
3540 return 0; 3540 return 0;
@@ -4578,7 +4578,7 @@ int get_and_dump_counters(void)
4578} 4578}
4579 4579
4580void print_version() { 4580void print_version() {
4581 fprintf(outf, "turbostat version 17.02.24" 4581 fprintf(outf, "turbostat version 17.04.12"
4582 " - Len Brown <lenb@kernel.org>\n"); 4582 " - Len Brown <lenb@kernel.org>\n");
4583} 4583}
4584 4584
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 67531f47781b..9af09e8099c0 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -1,22 +1,30 @@
1LIBDIR := ../../../lib 1LIBDIR := ../../../lib
2BPFOBJ := $(LIBDIR)/bpf/bpf.o 2BPFDIR := $(LIBDIR)/bpf
3APIDIR := ../../../include/uapi
4GENDIR := ../../../../include/generated
5GENHDR := $(GENDIR)/autoconf.h
3 6
4CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I$(LIBDIR) $(BPFOBJ) 7ifneq ($(wildcard $(GENHDR)),)
8 GENFLAGS := -DHAVE_GENHDR
9endif
10
11CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS)
12LDLIBS += -lcap
5 13
6TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map 14TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map
7 15
8TEST_PROGS := test_kmod.sh 16TEST_PROGS := test_kmod.sh
9 17
10all: $(TEST_GEN_PROGS) 18include ../lib.mk
19
20BPFOBJ := $(OUTPUT)/bpf.o
21
22$(TEST_GEN_PROGS): $(BPFOBJ)
11 23
12.PHONY: all clean force 24.PHONY: force
13 25
14# force a rebuild of BPFOBJ when its dependencies are updated 26# force a rebuild of BPFOBJ when its dependencies are updated
15force: 27force:
16 28
17$(BPFOBJ): force 29$(BPFOBJ): force
18 $(MAKE) -C $(dir $(BPFOBJ)) 30 $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/
19
20$(test_objs): $(BPFOBJ)
21
22include ../lib.mk
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index cada17ac00b8..20f1871874df 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -80,8 +80,9 @@ static void test_hashmap(int task, void *data)
80 assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0); 80 assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0);
81 key = 2; 81 key = 2;
82 assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0); 82 assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
83 key = 1; 83 key = 3;
84 assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0); 84 assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
85 errno == E2BIG);
85 86
86 /* Check that key = 0 doesn't exist. */ 87 /* Check that key = 0 doesn't exist. */
87 key = 0; 88 key = 0;
@@ -110,6 +111,24 @@ static void test_hashmap(int task, void *data)
110 close(fd); 111 close(fd);
111} 112}
112 113
114static void test_hashmap_sizes(int task, void *data)
115{
116 int fd, i, j;
117
118 for (i = 1; i <= 512; i <<= 1)
119 for (j = 1; j <= 1 << 18; j <<= 1) {
120 fd = bpf_create_map(BPF_MAP_TYPE_HASH, i, j,
121 2, map_flags);
122 if (fd < 0) {
123 printf("Failed to create hashmap key=%d value=%d '%s'\n",
124 i, j, strerror(errno));
125 exit(1);
126 }
127 close(fd);
128 usleep(10); /* give kernel time to destroy */
129 }
130}
131
113static void test_hashmap_percpu(int task, void *data) 132static void test_hashmap_percpu(int task, void *data)
114{ 133{
115 unsigned int nr_cpus = bpf_num_possible_cpus(); 134 unsigned int nr_cpus = bpf_num_possible_cpus();
@@ -263,7 +282,7 @@ static void test_arraymap_percpu(int task, void *data)
263{ 282{
264 unsigned int nr_cpus = bpf_num_possible_cpus(); 283 unsigned int nr_cpus = bpf_num_possible_cpus();
265 int key, next_key, fd, i; 284 int key, next_key, fd, i;
266 long values[nr_cpus]; 285 long long values[nr_cpus];
267 286
268 fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), 287 fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
269 sizeof(values[0]), 2, 0); 288 sizeof(values[0]), 2, 0);
@@ -317,8 +336,11 @@ static void test_arraymap_percpu(int task, void *data)
317static void test_arraymap_percpu_many_keys(void) 336static void test_arraymap_percpu_many_keys(void)
318{ 337{
319 unsigned int nr_cpus = bpf_num_possible_cpus(); 338 unsigned int nr_cpus = bpf_num_possible_cpus();
320 unsigned int nr_keys = 20000; 339 /* nr_keys is not too large otherwise the test stresses percpu
321 long values[nr_cpus]; 340 * allocator more than anything else
341 */
342 unsigned int nr_keys = 2000;
343 long long values[nr_cpus];
322 int key, fd, i; 344 int key, fd, i;
323 345
324 fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), 346 fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
@@ -419,6 +441,7 @@ static void test_map_stress(void)
419{ 441{
420 run_parallel(100, test_hashmap, NULL); 442 run_parallel(100, test_hashmap, NULL);
421 run_parallel(100, test_hashmap_percpu, NULL); 443 run_parallel(100, test_hashmap_percpu, NULL);
444 run_parallel(100, test_hashmap_sizes, NULL);
422 445
423 run_parallel(100, test_arraymap, NULL); 446 run_parallel(100, test_arraymap, NULL);
424 run_parallel(100, test_arraymap_percpu, NULL); 447 run_parallel(100, test_arraymap_percpu, NULL);
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index d1555e4240c0..c848e90b6421 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -30,6 +30,14 @@
30 30
31#include <bpf/bpf.h> 31#include <bpf/bpf.h>
32 32
33#ifdef HAVE_GENHDR
34# include "autoconf.h"
35#else
36# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
37# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
38# endif
39#endif
40
33#include "../../../include/linux/filter.h" 41#include "../../../include/linux/filter.h"
34 42
35#ifndef ARRAY_SIZE 43#ifndef ARRAY_SIZE
@@ -39,6 +47,8 @@
39#define MAX_INSNS 512 47#define MAX_INSNS 512
40#define MAX_FIXUPS 8 48#define MAX_FIXUPS 8
41 49
50#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
51
42struct bpf_test { 52struct bpf_test {
43 const char *descr; 53 const char *descr;
44 struct bpf_insn insns[MAX_INSNS]; 54 struct bpf_insn insns[MAX_INSNS];
@@ -53,6 +63,7 @@ struct bpf_test {
53 REJECT 63 REJECT
54 } result, result_unpriv; 64 } result, result_unpriv;
55 enum bpf_prog_type prog_type; 65 enum bpf_prog_type prog_type;
66 uint8_t flags;
56}; 67};
57 68
58/* Note we want this to be 64 bit aligned so that the end of our array is 69/* Note we want this to be 64 bit aligned so that the end of our array is
@@ -2432,6 +2443,30 @@ static struct bpf_test tests[] = {
2432 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2443 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2433 }, 2444 },
2434 { 2445 {
2446 "direct packet access: test15 (spill with xadd)",
2447 .insns = {
2448 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2449 offsetof(struct __sk_buff, data)),
2450 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2451 offsetof(struct __sk_buff, data_end)),
2452 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2453 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2454 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2455 BPF_MOV64_IMM(BPF_REG_5, 4096),
2456 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2457 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2458 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2459 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
2460 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
2461 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
2462 BPF_MOV64_IMM(BPF_REG_0, 0),
2463 BPF_EXIT_INSN(),
2464 },
2465 .errstr = "R2 invalid mem access 'inv'",
2466 .result = REJECT,
2467 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2468 },
2469 {
2435 "helper access to packet: test1, valid packet_ptr range", 2470 "helper access to packet: test1, valid packet_ptr range",
2436 .insns = { 2471 .insns = {
2437 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2472 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -2934,6 +2969,7 @@ static struct bpf_test tests[] = {
2934 .errstr_unpriv = "R0 pointer arithmetic prohibited", 2969 .errstr_unpriv = "R0 pointer arithmetic prohibited",
2935 .result_unpriv = REJECT, 2970 .result_unpriv = REJECT,
2936 .result = ACCEPT, 2971 .result = ACCEPT,
2972 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2937 }, 2973 },
2938 { 2974 {
2939 "valid map access into an array with a variable", 2975 "valid map access into an array with a variable",
@@ -2957,6 +2993,7 @@ static struct bpf_test tests[] = {
2957 .errstr_unpriv = "R0 pointer arithmetic prohibited", 2993 .errstr_unpriv = "R0 pointer arithmetic prohibited",
2958 .result_unpriv = REJECT, 2994 .result_unpriv = REJECT,
2959 .result = ACCEPT, 2995 .result = ACCEPT,
2996 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2960 }, 2997 },
2961 { 2998 {
2962 "valid map access into an array with a signed variable", 2999 "valid map access into an array with a signed variable",
@@ -2984,6 +3021,7 @@ static struct bpf_test tests[] = {
2984 .errstr_unpriv = "R0 pointer arithmetic prohibited", 3021 .errstr_unpriv = "R0 pointer arithmetic prohibited",
2985 .result_unpriv = REJECT, 3022 .result_unpriv = REJECT,
2986 .result = ACCEPT, 3023 .result = ACCEPT,
3024 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2987 }, 3025 },
2988 { 3026 {
2989 "invalid map access into an array with a constant", 3027 "invalid map access into an array with a constant",
@@ -3025,6 +3063,7 @@ static struct bpf_test tests[] = {
3025 .errstr = "R0 min value is outside of the array range", 3063 .errstr = "R0 min value is outside of the array range",
3026 .result_unpriv = REJECT, 3064 .result_unpriv = REJECT,
3027 .result = REJECT, 3065 .result = REJECT,
3066 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3028 }, 3067 },
3029 { 3068 {
3030 "invalid map access into an array with a variable", 3069 "invalid map access into an array with a variable",
@@ -3048,6 +3087,7 @@ static struct bpf_test tests[] = {
3048 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 3087 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
3049 .result_unpriv = REJECT, 3088 .result_unpriv = REJECT,
3050 .result = REJECT, 3089 .result = REJECT,
3090 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3051 }, 3091 },
3052 { 3092 {
3053 "invalid map access into an array with no floor check", 3093 "invalid map access into an array with no floor check",
@@ -3074,6 +3114,7 @@ static struct bpf_test tests[] = {
3074 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 3114 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
3075 .result_unpriv = REJECT, 3115 .result_unpriv = REJECT,
3076 .result = REJECT, 3116 .result = REJECT,
3117 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3077 }, 3118 },
3078 { 3119 {
3079 "invalid map access into an array with a invalid max check", 3120 "invalid map access into an array with a invalid max check",
@@ -3100,6 +3141,7 @@ static struct bpf_test tests[] = {
3100 .errstr = "invalid access to map value, value_size=48 off=44 size=8", 3141 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
3101 .result_unpriv = REJECT, 3142 .result_unpriv = REJECT,
3102 .result = REJECT, 3143 .result = REJECT,
3144 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3103 }, 3145 },
3104 { 3146 {
3105 "invalid map access into an array with a invalid max check", 3147 "invalid map access into an array with a invalid max check",
@@ -3129,6 +3171,7 @@ static struct bpf_test tests[] = {
3129 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 3171 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
3130 .result_unpriv = REJECT, 3172 .result_unpriv = REJECT,
3131 .result = REJECT, 3173 .result = REJECT,
3174 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3132 }, 3175 },
3133 { 3176 {
3134 "multiple registers share map_lookup_elem result", 3177 "multiple registers share map_lookup_elem result",
@@ -3252,6 +3295,7 @@ static struct bpf_test tests[] = {
3252 .result = REJECT, 3295 .result = REJECT,
3253 .errstr_unpriv = "R0 pointer arithmetic prohibited", 3296 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3254 .result_unpriv = REJECT, 3297 .result_unpriv = REJECT,
3298 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3255 }, 3299 },
3256 { 3300 {
3257 "constant register |= constant should keep constant type", 3301 "constant register |= constant should keep constant type",
@@ -3418,6 +3462,26 @@ static struct bpf_test tests[] = {
3418 .prog_type = BPF_PROG_TYPE_LWT_XMIT, 3462 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3419 }, 3463 },
3420 { 3464 {
3465 "overlapping checks for direct packet access",
3466 .insns = {
3467 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3468 offsetof(struct __sk_buff, data)),
3469 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3470 offsetof(struct __sk_buff, data_end)),
3471 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3472 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3473 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
3474 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3475 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
3476 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
3477 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
3478 BPF_MOV64_IMM(BPF_REG_0, 0),
3479 BPF_EXIT_INSN(),
3480 },
3481 .result = ACCEPT,
3482 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3483 },
3484 {
3421 "invalid access of tc_classid for LWT_IN", 3485 "invalid access of tc_classid for LWT_IN",
3422 .insns = { 3486 .insns = {
3423 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 3487 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
@@ -3961,7 +4025,208 @@ static struct bpf_test tests[] = {
3961 .result_unpriv = REJECT, 4025 .result_unpriv = REJECT,
3962 }, 4026 },
3963 { 4027 {
3964 "map element value (adjusted) is preserved across register spilling", 4028 "map element value or null is marked on register spilling",
4029 .insns = {
4030 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4031 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4032 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4033 BPF_LD_MAP_FD(BPF_REG_1, 0),
4034 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4035 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4036 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
4037 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4038 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4039 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4040 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4041 BPF_EXIT_INSN(),
4042 },
4043 .fixup_map2 = { 3 },
4044 .errstr_unpriv = "R0 leaks addr",
4045 .result = ACCEPT,
4046 .result_unpriv = REJECT,
4047 },
4048 {
4049 "map element value store of cleared call register",
4050 .insns = {
4051 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4052 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4053 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4054 BPF_LD_MAP_FD(BPF_REG_1, 0),
4055 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4056 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4057 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
4058 BPF_EXIT_INSN(),
4059 },
4060 .fixup_map2 = { 3 },
4061 .errstr_unpriv = "R1 !read_ok",
4062 .errstr = "R1 !read_ok",
4063 .result = REJECT,
4064 .result_unpriv = REJECT,
4065 },
4066 {
4067 "map element value with unaligned store",
4068 .insns = {
4069 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4070 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4071 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4072 BPF_LD_MAP_FD(BPF_REG_1, 0),
4073 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4074 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
4075 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
4076 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
4077 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
4078 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
4079 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4080 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
4081 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
4082 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
4083 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
4084 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
4085 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
4086 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
4087 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
4088 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
4089 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
4090 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
4091 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
4092 BPF_EXIT_INSN(),
4093 },
4094 .fixup_map2 = { 3 },
4095 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4096 .result = ACCEPT,
4097 .result_unpriv = REJECT,
4098 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4099 },
4100 {
4101 "map element value with unaligned load",
4102 .insns = {
4103 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4105 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4106 BPF_LD_MAP_FD(BPF_REG_1, 0),
4107 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4108 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4109 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4110 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
4111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
4112 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
4113 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
4114 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4115 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
4116 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
4117 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
4118 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
4119 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
4120 BPF_EXIT_INSN(),
4121 },
4122 .fixup_map2 = { 3 },
4123 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4124 .result = ACCEPT,
4125 .result_unpriv = REJECT,
4126 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4127 },
4128 {
4129 "map element value illegal alu op, 1",
4130 .insns = {
4131 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4132 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4133 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4134 BPF_LD_MAP_FD(BPF_REG_1, 0),
4135 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4136 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4137 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
4138 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4139 BPF_EXIT_INSN(),
4140 },
4141 .fixup_map2 = { 3 },
4142 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4143 .errstr = "invalid mem access 'inv'",
4144 .result = REJECT,
4145 .result_unpriv = REJECT,
4146 },
4147 {
4148 "map element value illegal alu op, 2",
4149 .insns = {
4150 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4151 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4152 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4153 BPF_LD_MAP_FD(BPF_REG_1, 0),
4154 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4155 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4156 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
4157 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4158 BPF_EXIT_INSN(),
4159 },
4160 .fixup_map2 = { 3 },
4161 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4162 .errstr = "invalid mem access 'inv'",
4163 .result = REJECT,
4164 .result_unpriv = REJECT,
4165 },
4166 {
4167 "map element value illegal alu op, 3",
4168 .insns = {
4169 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4170 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4171 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4172 BPF_LD_MAP_FD(BPF_REG_1, 0),
4173 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4174 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4175 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
4176 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4177 BPF_EXIT_INSN(),
4178 },
4179 .fixup_map2 = { 3 },
4180 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4181 .errstr = "invalid mem access 'inv'",
4182 .result = REJECT,
4183 .result_unpriv = REJECT,
4184 },
4185 {
4186 "map element value illegal alu op, 4",
4187 .insns = {
4188 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4189 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4190 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4191 BPF_LD_MAP_FD(BPF_REG_1, 0),
4192 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4193 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4194 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
4195 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4196 BPF_EXIT_INSN(),
4197 },
4198 .fixup_map2 = { 3 },
4199 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4200 .errstr = "invalid mem access 'inv'",
4201 .result = REJECT,
4202 .result_unpriv = REJECT,
4203 },
4204 {
4205 "map element value illegal alu op, 5",
4206 .insns = {
4207 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4208 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4209 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4210 BPF_LD_MAP_FD(BPF_REG_1, 0),
4211 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4212 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4213 BPF_MOV64_IMM(BPF_REG_3, 4096),
4214 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4215 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4216 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
4217 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
4218 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
4219 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4220 BPF_EXIT_INSN(),
4221 },
4222 .fixup_map2 = { 3 },
4223 .errstr_unpriv = "R0 invalid mem access 'inv'",
4224 .errstr = "R0 invalid mem access 'inv'",
4225 .result = REJECT,
4226 .result_unpriv = REJECT,
4227 },
4228 {
4229 "map element value is preserved across register spilling",
3965 .insns = { 4230 .insns = {
3966 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4231 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3967 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4232 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
@@ -3983,6 +4248,7 @@ static struct bpf_test tests[] = {
3983 .errstr_unpriv = "R0 pointer arithmetic prohibited", 4248 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3984 .result = ACCEPT, 4249 .result = ACCEPT,
3985 .result_unpriv = REJECT, 4250 .result_unpriv = REJECT,
4251 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3986 }, 4252 },
3987 { 4253 {
3988 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds", 4254 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
@@ -4421,6 +4687,7 @@ static struct bpf_test tests[] = {
4421 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 4687 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
4422 .result = REJECT, 4688 .result = REJECT,
4423 .result_unpriv = REJECT, 4689 .result_unpriv = REJECT,
4690 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4424 }, 4691 },
4425 { 4692 {
4426 "invalid range check", 4693 "invalid range check",
@@ -4452,6 +4719,7 @@ static struct bpf_test tests[] = {
4452 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 4719 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
4453 .result = REJECT, 4720 .result = REJECT,
4454 .result_unpriv = REJECT, 4721 .result_unpriv = REJECT,
4722 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4455 } 4723 }
4456}; 4724};
4457 4725
@@ -4530,11 +4798,11 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
4530static void do_test_single(struct bpf_test *test, bool unpriv, 4798static void do_test_single(struct bpf_test *test, bool unpriv,
4531 int *passes, int *errors) 4799 int *passes, int *errors)
4532{ 4800{
4801 int fd_prog, expected_ret, reject_from_alignment;
4533 struct bpf_insn *prog = test->insns; 4802 struct bpf_insn *prog = test->insns;
4534 int prog_len = probe_filter_length(prog); 4803 int prog_len = probe_filter_length(prog);
4535 int prog_type = test->prog_type; 4804 int prog_type = test->prog_type;
4536 int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1; 4805 int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1;
4537 int fd_prog, expected_ret;
4538 const char *expected_err; 4806 const char *expected_err;
4539 4807
4540 do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3); 4808 do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3);
@@ -4547,8 +4815,19 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
4547 test->result_unpriv : test->result; 4815 test->result_unpriv : test->result;
4548 expected_err = unpriv && test->errstr_unpriv ? 4816 expected_err = unpriv && test->errstr_unpriv ?
4549 test->errstr_unpriv : test->errstr; 4817 test->errstr_unpriv : test->errstr;
4818
4819 reject_from_alignment = fd_prog < 0 &&
4820 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
4821 strstr(bpf_vlog, "Unknown alignment.");
4822#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
4823 if (reject_from_alignment) {
4824 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
4825 strerror(errno));
4826 goto fail_log;
4827 }
4828#endif
4550 if (expected_ret == ACCEPT) { 4829 if (expected_ret == ACCEPT) {
4551 if (fd_prog < 0) { 4830 if (fd_prog < 0 && !reject_from_alignment) {
4552 printf("FAIL\nFailed to load prog '%s'!\n", 4831 printf("FAIL\nFailed to load prog '%s'!\n",
4553 strerror(errno)); 4832 strerror(errno));
4554 goto fail_log; 4833 goto fail_log;
@@ -4558,14 +4837,15 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
4558 printf("FAIL\nUnexpected success to load!\n"); 4837 printf("FAIL\nUnexpected success to load!\n");
4559 goto fail_log; 4838 goto fail_log;
4560 } 4839 }
4561 if (!strstr(bpf_vlog, expected_err)) { 4840 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
4562 printf("FAIL\nUnexpected error message!\n"); 4841 printf("FAIL\nUnexpected error message!\n");
4563 goto fail_log; 4842 goto fail_log;
4564 } 4843 }
4565 } 4844 }
4566 4845
4567 (*passes)++; 4846 (*passes)++;
4568 printf("OK\n"); 4847 printf("OK%s\n", reject_from_alignment ?
4848 " (NOTE: reject due to unknown alignment)" : "");
4569close_fds: 4849close_fds:
4570 close(fd_prog); 4850 close(fd_prog);
4571 close(fd_f1); 4851 close(fd_f1);
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
new file mode 100644
index 000000000000..bab5ff7c607e
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
@@ -0,0 +1,117 @@
1#!/bin/sh
2# description: ftrace - function pid filters
3
4# Make sure that function pid matching filter works.
5# Also test it on an instance directory
6
7if ! grep -q function available_tracers; then
8 echo "no function tracer configured"
9 exit_unsupported
10fi
11
12if [ ! -f set_ftrace_pid ]; then
13 echo "set_ftrace_pid not found? Is function tracer not set?"
14 exit_unsupported
15fi
16
17if [ ! -f set_ftrace_filter ]; then
18 echo "set_ftrace_filter not found? Is function tracer not set?"
19 exit_unsupported
20fi
21
22do_function_fork=1
23
24if [ ! -f options/function-fork ]; then
25 do_function_fork=0
26 echo "no option for function-fork found. Option will not be tested."
27fi
28
29read PID _ < /proc/self/stat
30
31if [ $do_function_fork -eq 1 ]; then
32 # default value of function-fork option
33 orig_value=`grep function-fork trace_options`
34fi
35
36do_reset() {
37 reset_tracer
38 clear_trace
39 enable_tracing
40 echo > set_ftrace_filter
41 echo > set_ftrace_pid
42
43 if [ $do_function_fork -eq 0 ]; then
44 return
45 fi
46
47 echo $orig_value > trace_options
48}
49
50fail() { # msg
51 do_reset
52 echo $1
53 exit $FAIL
54}
55
56yield() {
57 ping localhost -c 1 || sleep .001 || usleep 1 || sleep 1
58}
59
60do_test() {
61 disable_tracing
62
63 echo do_execve* > set_ftrace_filter
64 echo *do_fork >> set_ftrace_filter
65
66 echo $PID > set_ftrace_pid
67 echo function > current_tracer
68
69 if [ $do_function_fork -eq 1 ]; then
70 # don't allow children to be traced
71 echo nofunction-fork > trace_options
72 fi
73
74 enable_tracing
75 yield
76
77 count_pid=`cat trace | grep -v ^# | grep $PID | wc -l`
78 count_other=`cat trace | grep -v ^# | grep -v $PID | wc -l`
79
80 # count_other should be 0
81 if [ $count_pid -eq 0 -o $count_other -ne 0 ]; then
82 fail "PID filtering not working?"
83 fi
84
85 disable_tracing
86 clear_trace
87
88 if [ $do_function_fork -eq 0 ]; then
89 return
90 fi
91
92 # allow children to be traced
93 echo function-fork > trace_options
94
95 enable_tracing
96 yield
97
98 count_pid=`cat trace | grep -v ^# | grep $PID | wc -l`
99 count_other=`cat trace | grep -v ^# | grep -v $PID | wc -l`
100
101 # count_other should NOT be 0
102 if [ $count_pid -eq 0 -o $count_other -eq 0 ]; then
103 fail "PID filtering not following fork?"
104 fi
105}
106
107do_test
108
109mkdir instances/foo
110cd instances/foo
111do_test
112cd ../../
113rmdir instances/foo
114
115do_reset
116
117exit 0
diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
index 412459369686..e62bb354820c 100644
--- a/tools/testing/selftests/net/psock_fanout.c
+++ b/tools/testing/selftests/net/psock_fanout.c
@@ -75,7 +75,7 @@ static int sock_fanout_open(uint16_t typeflags, int num_packets)
75{ 75{
76 int fd, val; 76 int fd, val;
77 77
78 fd = socket(PF_PACKET, SOCK_DGRAM, htons(ETH_P_IP)); 78 fd = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_IP));
79 if (fd < 0) { 79 if (fd < 0) {
80 perror("socket packet"); 80 perror("socket packet");
81 exit(1); 81 exit(1);
@@ -95,6 +95,24 @@ static int sock_fanout_open(uint16_t typeflags, int num_packets)
95 return fd; 95 return fd;
96} 96}
97 97
98static void sock_fanout_set_cbpf(int fd)
99{
100 struct sock_filter bpf_filter[] = {
101 BPF_STMT(BPF_LD+BPF_B+BPF_ABS, 80), /* ldb [80] */
102 BPF_STMT(BPF_RET+BPF_A, 0), /* ret A */
103 };
104 struct sock_fprog bpf_prog;
105
106 bpf_prog.filter = bpf_filter;
107 bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter);
108
109 if (setsockopt(fd, SOL_PACKET, PACKET_FANOUT_DATA, &bpf_prog,
110 sizeof(bpf_prog))) {
111 perror("fanout data cbpf");
112 exit(1);
113 }
114}
115
98static void sock_fanout_set_ebpf(int fd) 116static void sock_fanout_set_ebpf(int fd)
99{ 117{
100 const int len_off = __builtin_offsetof(struct __sk_buff, len); 118 const int len_off = __builtin_offsetof(struct __sk_buff, len);
@@ -270,7 +288,7 @@ static int test_datapath(uint16_t typeflags, int port_off,
270 exit(1); 288 exit(1);
271 } 289 }
272 if (type == PACKET_FANOUT_CBPF) 290 if (type == PACKET_FANOUT_CBPF)
273 sock_setfilter(fds[0], SOL_PACKET, PACKET_FANOUT_DATA); 291 sock_fanout_set_cbpf(fds[0]);
274 else if (type == PACKET_FANOUT_EBPF) 292 else if (type == PACKET_FANOUT_EBPF)
275 sock_fanout_set_ebpf(fds[0]); 293 sock_fanout_set_ebpf(fds[0]);
276 294
diff --git a/tools/testing/selftests/net/psock_lib.h b/tools/testing/selftests/net/psock_lib.h
index a77da88bf946..7d990d6c861b 100644
--- a/tools/testing/selftests/net/psock_lib.h
+++ b/tools/testing/selftests/net/psock_lib.h
@@ -38,7 +38,7 @@
38# define __maybe_unused __attribute__ ((__unused__)) 38# define __maybe_unused __attribute__ ((__unused__))
39#endif 39#endif
40 40
41static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum) 41static __maybe_unused void pair_udp_setfilter(int fd)
42{ 42{
43 /* the filter below checks for all of the following conditions that 43 /* the filter below checks for all of the following conditions that
44 * are based on the contents of create_payload() 44 * are based on the contents of create_payload()
@@ -76,23 +76,16 @@ static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum)
76 }; 76 };
77 struct sock_fprog bpf_prog; 77 struct sock_fprog bpf_prog;
78 78
79 if (lvl == SOL_PACKET && optnum == PACKET_FANOUT_DATA)
80 bpf_filter[5].code = 0x16; /* RET A */
81
82 bpf_prog.filter = bpf_filter; 79 bpf_prog.filter = bpf_filter;
83 bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter); 80 bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter);
84 if (setsockopt(fd, lvl, optnum, &bpf_prog, 81
82 if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &bpf_prog,
85 sizeof(bpf_prog))) { 83 sizeof(bpf_prog))) {
86 perror("setsockopt SO_ATTACH_FILTER"); 84 perror("setsockopt SO_ATTACH_FILTER");
87 exit(1); 85 exit(1);
88 } 86 }
89} 87}
90 88
91static __maybe_unused void pair_udp_setfilter(int fd)
92{
93 sock_setfilter(fd, SOL_SOCKET, SO_ATTACH_FILTER);
94}
95
96static __maybe_unused void pair_udp_open(int fds[], uint16_t port) 89static __maybe_unused void pair_udp_open(int fds[], uint16_t port)
97{ 90{
98 struct sockaddr_in saddr, daddr; 91 struct sockaddr_in saddr, daddr;
diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile
index 1c5d0575802e..bf13fc2297aa 100644
--- a/tools/testing/selftests/powerpc/Makefile
+++ b/tools/testing/selftests/powerpc/Makefile
@@ -34,34 +34,34 @@ endif
34all: $(SUB_DIRS) 34all: $(SUB_DIRS)
35 35
36$(SUB_DIRS): 36$(SUB_DIRS):
37 BUILD_TARGET=$$OUTPUT/$@; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $@ all 37 BUILD_TARGET=$(OUTPUT)/$@; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $@ all
38 38
39include ../lib.mk 39include ../lib.mk
40 40
41override define RUN_TESTS 41override define RUN_TESTS
42 @for TARGET in $(SUB_DIRS); do \ 42 @for TARGET in $(SUB_DIRS); do \
43 BUILD_TARGET=$$OUTPUT/$$TARGET; \ 43 BUILD_TARGET=$(OUTPUT)/$$TARGET; \
44 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\ 44 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\
45 done; 45 done;
46endef 46endef
47 47
48override define INSTALL_RULE 48override define INSTALL_RULE
49 @for TARGET in $(SUB_DIRS); do \ 49 @for TARGET in $(SUB_DIRS); do \
50 BUILD_TARGET=$$OUTPUT/$$TARGET; \ 50 BUILD_TARGET=$(OUTPUT)/$$TARGET; \
51 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install;\ 51 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install;\
52 done; 52 done;
53endef 53endef
54 54
55override define EMIT_TESTS 55override define EMIT_TESTS
56 @for TARGET in $(SUB_DIRS); do \ 56 @for TARGET in $(SUB_DIRS); do \
57 BUILD_TARGET=$$OUTPUT/$$TARGET; \ 57 BUILD_TARGET=$(OUTPUT)/$$TARGET; \
58 $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests;\ 58 $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests;\
59 done; 59 done;
60endef 60endef
61 61
62clean: 62clean:
63 @for TARGET in $(SUB_DIRS); do \ 63 @for TARGET in $(SUB_DIRS); do \
64 BUILD_TARGET=$$OUTPUT/$$TARGET; \ 64 BUILD_TARGET=$(OUTPUT)/$$TARGET; \
65 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean; \ 65 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean; \
66 done; 66 done;
67 rm -f tags 67 rm -f tags
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 276139a24e6f..702f8108608d 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -392,6 +392,25 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data)
392} 392}
393 393
394/** 394/**
395 * kvm_vgic_init_cpu_hardware - initialize the GIC VE hardware
396 *
397 * For a specific CPU, initialize the GIC VE hardware.
398 */
399void kvm_vgic_init_cpu_hardware(void)
400{
401 BUG_ON(preemptible());
402
403 /*
404 * We want to make sure the list registers start out clear so that we
405 * only have the program the used registers.
406 */
407 if (kvm_vgic_global_state.type == VGIC_V2)
408 vgic_v2_init_lrs();
409 else
410 kvm_call_hyp(__vgic_v3_init_lrs);
411}
412
413/**
395 * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable 414 * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable
396 * according to the host GIC model. Accordingly calls either 415 * according to the host GIC model. Accordingly calls either
397 * vgic_v2/v3_probe which registers the KVM_DEVICE that can be 416 * vgic_v2/v3_probe which registers the KVM_DEVICE that can be
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index a3ad7ff95c9b..0a4283ed9aa7 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -229,7 +229,15 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
229 val = vmcr.ctlr; 229 val = vmcr.ctlr;
230 break; 230 break;
231 case GIC_CPU_PRIMASK: 231 case GIC_CPU_PRIMASK:
232 val = vmcr.pmr; 232 /*
233 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
234 * the PMR field as GICH_VMCR.VMPriMask rather than
235 * GICC_PMR.Priority, so we expose the upper five bits of
236 * priority mask to userspace using the lower bits in the
237 * unsigned long.
238 */
239 val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >>
240 GICV_PMR_PRIORITY_SHIFT;
233 break; 241 break;
234 case GIC_CPU_BINPOINT: 242 case GIC_CPU_BINPOINT:
235 val = vmcr.bpr; 243 val = vmcr.bpr;
@@ -262,7 +270,15 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
262 vmcr.ctlr = val; 270 vmcr.ctlr = val;
263 break; 271 break;
264 case GIC_CPU_PRIMASK: 272 case GIC_CPU_PRIMASK:
265 vmcr.pmr = val; 273 /*
274 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
275 * the PMR field as GICH_VMCR.VMPriMask rather than
276 * GICC_PMR.Priority, so we expose the upper five bits of
277 * priority mask to userspace using the lower bits in the
278 * unsigned long.
279 */
280 vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) &
281 GICV_PMR_PRIORITY_MASK;
266 break; 282 break;
267 case GIC_CPU_BINPOINT: 283 case GIC_CPU_BINPOINT:
268 vmcr.bpr = val; 284 vmcr.bpr = val;
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index b834ecdf3225..b637d9c7afe3 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -36,6 +36,21 @@ static unsigned long *u64_to_bitmask(u64 *val)
36 return (unsigned long *)val; 36 return (unsigned long *)val;
37} 37}
38 38
39static inline void vgic_v2_write_lr(int lr, u32 val)
40{
41 void __iomem *base = kvm_vgic_global_state.vctrl_base;
42
43 writel_relaxed(val, base + GICH_LR0 + (lr * 4));
44}
45
46void vgic_v2_init_lrs(void)
47{
48 int i;
49
50 for (i = 0; i < kvm_vgic_global_state.nr_lr; i++)
51 vgic_v2_write_lr(i, 0);
52}
53
39void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu) 54void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
40{ 55{
41 struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; 56 struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
@@ -191,8 +206,8 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
191 GICH_VMCR_ALIAS_BINPOINT_MASK; 206 GICH_VMCR_ALIAS_BINPOINT_MASK;
192 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & 207 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
193 GICH_VMCR_BINPOINT_MASK; 208 GICH_VMCR_BINPOINT_MASK;
194 vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & 209 vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) <<
195 GICH_VMCR_PRIMASK_MASK; 210 GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
196 211
197 vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr; 212 vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
198} 213}
@@ -207,8 +222,8 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
207 GICH_VMCR_ALIAS_BINPOINT_SHIFT; 222 GICH_VMCR_ALIAS_BINPOINT_SHIFT;
208 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> 223 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
209 GICH_VMCR_BINPOINT_SHIFT; 224 GICH_VMCR_BINPOINT_SHIFT;
210 vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> 225 vmcrp->pmr = ((vmcr & GICH_VMCR_PRIMASK_MASK) >>
211 GICH_VMCR_PRIMASK_SHIFT; 226 GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT;
212} 227}
213 228
214void vgic_v2_enable(struct kvm_vcpu *vcpu) 229void vgic_v2_enable(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index db28f7cadab2..6cf557e9f718 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -81,11 +81,18 @@ static inline bool irq_is_pending(struct vgic_irq *irq)
81 return irq->pending_latch || irq->line_level; 81 return irq->pending_latch || irq->line_level;
82} 82}
83 83
84/*
85 * This struct provides an intermediate representation of the fields contained
86 * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
87 * state to userspace can generate either GICv2 or GICv3 CPU interface
88 * registers regardless of the hardware backed GIC used.
89 */
84struct vgic_vmcr { 90struct vgic_vmcr {
85 u32 ctlr; 91 u32 ctlr;
86 u32 abpr; 92 u32 abpr;
87 u32 bpr; 93 u32 bpr;
88 u32 pmr; 94 u32 pmr; /* Priority mask field in the GICC_PMR and
95 * ICC_PMR_EL1 priority field format */
89 /* Below member variable are valid only for GICv3 */ 96 /* Below member variable are valid only for GICv3 */
90 u32 grpen0; 97 u32 grpen0;
91 u32 grpen1; 98 u32 grpen1;
@@ -130,6 +137,8 @@ int vgic_v2_map_resources(struct kvm *kvm);
130int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, 137int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
131 enum vgic_type); 138 enum vgic_type);
132 139
140void vgic_v2_init_lrs(void);
141
133static inline void vgic_get_irq_kref(struct vgic_irq *irq) 142static inline void vgic_get_irq_kref(struct vgic_irq *irq)
134{ 143{
135 if (irq->intid < VGIC_MIN_LPI) 144 if (irq->intid < VGIC_MIN_LPI)
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index a29786dd9522..4d28a9ddbee0 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -870,7 +870,8 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
870 continue; 870 continue;
871 871
872 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); 872 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
873 kvm->buses[bus_idx]->ioeventfd_count--; 873 if (kvm->buses[bus_idx])
874 kvm->buses[bus_idx]->ioeventfd_count--;
874 ioeventfd_release(p); 875 ioeventfd_release(p);
875 ret = 0; 876 ret = 0;
876 break; 877 break;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a17d78759727..88257b311cb5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -727,8 +727,11 @@ static void kvm_destroy_vm(struct kvm *kvm)
727 list_del(&kvm->vm_list); 727 list_del(&kvm->vm_list);
728 spin_unlock(&kvm_lock); 728 spin_unlock(&kvm_lock);
729 kvm_free_irq_routing(kvm); 729 kvm_free_irq_routing(kvm);
730 for (i = 0; i < KVM_NR_BUSES; i++) 730 for (i = 0; i < KVM_NR_BUSES; i++) {
731 kvm_io_bus_destroy(kvm->buses[i]); 731 if (kvm->buses[i])
732 kvm_io_bus_destroy(kvm->buses[i]);
733 kvm->buses[i] = NULL;
734 }
732 kvm_coalesced_mmio_free(kvm); 735 kvm_coalesced_mmio_free(kvm);
733#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 736#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
734 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 737 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
@@ -1062,7 +1065,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
1062 * changes) is disallowed above, so any other attribute changes getting 1065 * changes) is disallowed above, so any other attribute changes getting
1063 * here can be skipped. 1066 * here can be skipped.
1064 */ 1067 */
1065 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 1068 if (as_id == 0 && (change == KVM_MR_CREATE || change == KVM_MR_MOVE)) {
1066 r = kvm_iommu_map_pages(kvm, &new); 1069 r = kvm_iommu_map_pages(kvm, &new);
1067 return r; 1070 return r;
1068 } 1071 }
@@ -3474,6 +3477,8 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3474 }; 3477 };
3475 3478
3476 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3479 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3480 if (!bus)
3481 return -ENOMEM;
3477 r = __kvm_io_bus_write(vcpu, bus, &range, val); 3482 r = __kvm_io_bus_write(vcpu, bus, &range, val);
3478 return r < 0 ? r : 0; 3483 return r < 0 ? r : 0;
3479} 3484}
@@ -3491,6 +3496,8 @@ int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
3491 }; 3496 };
3492 3497
3493 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3498 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3499 if (!bus)
3500 return -ENOMEM;
3494 3501
3495 /* First try the device referenced by cookie. */ 3502 /* First try the device referenced by cookie. */
3496 if ((cookie >= 0) && (cookie < bus->dev_count) && 3503 if ((cookie >= 0) && (cookie < bus->dev_count) &&
@@ -3541,6 +3548,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3541 }; 3548 };
3542 3549
3543 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3550 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3551 if (!bus)
3552 return -ENOMEM;
3544 r = __kvm_io_bus_read(vcpu, bus, &range, val); 3553 r = __kvm_io_bus_read(vcpu, bus, &range, val);
3545 return r < 0 ? r : 0; 3554 return r < 0 ? r : 0;
3546} 3555}
@@ -3553,6 +3562,9 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3553 struct kvm_io_bus *new_bus, *bus; 3562 struct kvm_io_bus *new_bus, *bus;
3554 3563
3555 bus = kvm->buses[bus_idx]; 3564 bus = kvm->buses[bus_idx];
3565 if (!bus)
3566 return -ENOMEM;
3567
3556 /* exclude ioeventfd which is limited by maximum fd */ 3568 /* exclude ioeventfd which is limited by maximum fd */
3557 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 3569 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
3558 return -ENOSPC; 3570 return -ENOSPC;
@@ -3572,37 +3584,41 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3572} 3584}
3573 3585
3574/* Caller must hold slots_lock. */ 3586/* Caller must hold slots_lock. */
3575int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 3587void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
3576 struct kvm_io_device *dev) 3588 struct kvm_io_device *dev)
3577{ 3589{
3578 int i, r; 3590 int i;
3579 struct kvm_io_bus *new_bus, *bus; 3591 struct kvm_io_bus *new_bus, *bus;
3580 3592
3581 bus = kvm->buses[bus_idx]; 3593 bus = kvm->buses[bus_idx];
3582 r = -ENOENT; 3594 if (!bus)
3595 return;
3596
3583 for (i = 0; i < bus->dev_count; i++) 3597 for (i = 0; i < bus->dev_count; i++)
3584 if (bus->range[i].dev == dev) { 3598 if (bus->range[i].dev == dev) {
3585 r = 0;
3586 break; 3599 break;
3587 } 3600 }
3588 3601
3589 if (r) 3602 if (i == bus->dev_count)
3590 return r; 3603 return;
3591 3604
3592 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) * 3605 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
3593 sizeof(struct kvm_io_range)), GFP_KERNEL); 3606 sizeof(struct kvm_io_range)), GFP_KERNEL);
3594 if (!new_bus) 3607 if (!new_bus) {
3595 return -ENOMEM; 3608 pr_err("kvm: failed to shrink bus, removing it completely\n");
3609 goto broken;
3610 }
3596 3611
3597 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 3612 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
3598 new_bus->dev_count--; 3613 new_bus->dev_count--;
3599 memcpy(new_bus->range + i, bus->range + i + 1, 3614 memcpy(new_bus->range + i, bus->range + i + 1,
3600 (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); 3615 (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
3601 3616
3617broken:
3602 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3618 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
3603 synchronize_srcu_expedited(&kvm->srcu); 3619 synchronize_srcu_expedited(&kvm->srcu);
3604 kfree(bus); 3620 kfree(bus);
3605 return r; 3621 return;
3606} 3622}
3607 3623
3608struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 3624struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
@@ -3615,6 +3631,8 @@ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
3615 srcu_idx = srcu_read_lock(&kvm->srcu); 3631 srcu_idx = srcu_read_lock(&kvm->srcu);
3616 3632
3617 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 3633 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
3634 if (!bus)
3635 goto out_unlock;
3618 3636
3619 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); 3637 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
3620 if (dev_idx < 0) 3638 if (dev_idx < 0)